summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKentoku SHIBA <kentokushiba@gmail.com>2021-04-28 16:45:50 +0900
committerGitHub <noreply@github.com>2021-04-28 16:45:50 +0900
commit977115add60f0f9d6258e5ebcb512a1c97492691 (patch)
tree6c5dff26ceecebc6607a180b98b8711b88dd25f7
parentb5d4964d1e56f91a0f129e72e850ed6220c52002 (diff)
parent4cd92143eae9b397589e5b449d1a85c43b3e4f6b (diff)
downloadmariadb-git-bb-10.4-MDEV-22265.tar.gz
Merge branch '10.4' into bb-10.4-MDEV-22265bb-10.4-MDEV-22265
-rwxr-xr-xBUILD/SETUP.sh2
-rwxr-xr-xBUILD/compile-pentium64-ubsan29
-rw-r--r--CMakeLists.txt21
-rw-r--r--CREDITS13
-rw-r--r--EXCEPTIONS-CLIENT136
-rw-r--r--VERSION2
-rw-r--r--client/mysql.cc6
-rw-r--r--client/mysqldump.c103
-rw-r--r--client/mysqlslap.c3
-rw-r--r--client/mysqltest.cc2
-rw-r--r--cmake/configure.pl10
-rw-r--r--cmake/install_macros.cmake2
-rw-r--r--cmake/maintainer.cmake6
-rw-r--r--cmake/make_dist.cmake.in7
-rw-r--r--cmake/os/WindowsCache.cmake1
-rw-r--r--config.h.cmake1
-rw-r--r--configure.cmake2
-rwxr-xr-xdebian/autobake-deb.sh15
-rw-r--r--debian/control2
-rw-r--r--debian/libmariadb3.install1
-rw-r--r--debian/mariadb-server-10.4.postinst4
-rw-r--r--extra/mariabackup/backup_copy.cc14
-rw-r--r--extra/mariabackup/backup_mysql.cc8
-rw-r--r--extra/mariabackup/datasink.h6
-rw-r--r--extra/mariabackup/ds_archive.cc1
-rw-r--r--extra/mariabackup/ds_buffer.cc1
-rw-r--r--extra/mariabackup/ds_compress.cc1
-rw-r--r--extra/mariabackup/ds_local.cc6
-rw-r--r--extra/mariabackup/ds_stdout.cc1
-rw-r--r--extra/mariabackup/ds_tmpfile.cc1
-rw-r--r--extra/mariabackup/ds_xbstream.cc1
-rw-r--r--extra/mariabackup/innobackupex.cc14
-rw-r--r--extra/mariabackup/xtrabackup.cc99
-rw-r--r--extra/mariabackup/xtrabackup.h2
-rw-r--r--extra/my_print_defaults.c2
-rw-r--r--extra/wolfssl/CMakeLists.txt57
m---------extra/wolfssl/wolfssl0
-rw-r--r--include/byte_order_generic.h8
-rw-r--r--include/byte_order_generic_x86.h1
-rw-r--r--include/json_lib.h1
-rw-r--r--include/my_base.h6
-rw-r--r--include/my_byteorder.h4
-rw-r--r--include/my_compare.h11
-rw-r--r--include/my_pthread.h1
-rw-r--r--include/myisampack.h10
-rw-r--r--include/mysql/plugin_ftparser.h4
-rw-r--r--include/mysql/service_wsrep.h10
-rw-r--r--include/mysql_com.h2
-rw-r--r--include/service_versions.h4
-rw-r--r--include/ssl_compat.h6
m---------libmariadb0
-rw-r--r--libmysqld/lib_sql.cc3
-rw-r--r--man/mysqlbinlog.121
-rw-r--r--man/mysqldump.19
-rw-r--r--mysql-test/collections/smoke_test146
-rw-r--r--mysql-test/include/ctype_utf8mb4.inc2
-rw-r--r--mysql-test/include/galera_no_debug_sync.inc9
-rw-r--r--mysql-test/include/icp_tests.inc12
-rw-r--r--mysql-test/include/not_asan.inc8
-rw-r--r--mysql-test/include/not_ubsan.inc8
-rw-r--r--mysql-test/lib/My/Debugger.pm266
-rw-r--r--mysql-test/lib/mtr_report.pm30
-rw-r--r--mysql-test/main/alter_table.result17
-rw-r--r--mysql-test/main/alter_table.test12
-rw-r--r--mysql-test/main/backup_lock_binlog.result40
-rw-r--r--mysql-test/main/backup_lock_binlog.test49
-rw-r--r--mysql-test/main/brackets.result16
-rw-r--r--mysql-test/main/check_constraint.result12
-rw-r--r--mysql-test/main/check_constraint.test8
-rw-r--r--mysql-test/main/contributors.result13
-rw-r--r--mysql-test/main/create.result36
-rw-r--r--mysql-test/main/create.test154
-rw-r--r--mysql-test/main/cte_nonrecursive.result85
-rw-r--r--mysql-test/main/cte_nonrecursive.test60
-rw-r--r--mysql-test/main/cte_nonrecursive_not_embedded.result48
-rw-r--r--mysql-test/main/cte_nonrecursive_not_embedded.test58
-rw-r--r--mysql-test/main/cte_recursive.result271
-rw-r--r--mysql-test/main/cte_recursive.test129
-rw-r--r--mysql-test/main/ctype_utf16.result2
-rw-r--r--mysql-test/main/ctype_utf16le.result2
-rw-r--r--mysql-test/main/ctype_utf32.result4
-rw-r--r--mysql-test/main/ctype_utf8.result4
-rw-r--r--mysql-test/main/ctype_utf8mb4.result10
-rw-r--r--mysql-test/main/ctype_utf8mb4.test2
-rw-r--r--mysql-test/main/ctype_utf8mb4_heap.result4
-rw-r--r--mysql-test/main/ctype_utf8mb4_innodb.result4
-rw-r--r--mysql-test/main/ctype_utf8mb4_myisam.result8
-rw-r--r--mysql-test/main/deadlock_ftwrl.result21
-rw-r--r--mysql-test/main/deadlock_ftwrl.test36
-rw-r--r--mysql-test/main/default.result15
-rw-r--r--mysql-test/main/default.test19
-rw-r--r--mysql-test/main/derived_cond_pushdown.result284
-rw-r--r--mysql-test/main/derived_cond_pushdown.test50
-rw-r--r--mysql-test/main/derived_opt.result27
-rw-r--r--mysql-test/main/derived_opt.test33
-rw-r--r--mysql-test/main/empty_string_literal.result29
-rw-r--r--mysql-test/main/empty_string_literal.test19
-rw-r--r--mysql-test/main/flush_and_binlog.result33
-rw-r--r--mysql-test/main/flush_and_binlog.test43
-rw-r--r--mysql-test/main/func_gconcat.result12
-rw-r--r--mysql-test/main/func_gconcat.test10
-rw-r--r--mysql-test/main/func_group.result38
-rw-r--r--mysql-test/main/func_group.test34
-rw-r--r--mysql-test/main/func_like.result18
-rw-r--r--mysql-test/main/func_like.test29
-rw-r--r--mysql-test/main/gis-json.result7
-rw-r--r--mysql-test/main/gis-json.test2
-rw-r--r--mysql-test/main/gis-precise.result111
-rw-r--r--mysql-test/main/gis-precise.test78
-rw-r--r--mysql-test/main/group_by.result77
-rw-r--r--mysql-test/main/group_by.test60
-rw-r--r--mysql-test/main/group_min_max.result2
-rw-r--r--mysql-test/main/having.result33
-rw-r--r--mysql-test/main/having.test21
-rw-r--r--mysql-test/main/index_merge_myisam.result3
-rw-r--r--mysql-test/main/index_merge_myisam.test4
-rw-r--r--mysql-test/main/information_schema.result57
-rw-r--r--mysql-test/main/information_schema.test48
-rw-r--r--mysql-test/main/innodb_ext_key.result19
-rw-r--r--mysql-test/main/innodb_ext_key.test24
-rw-r--r--mysql-test/main/innodb_icp.result12
-rw-r--r--mysql-test/main/join_cache.result88
-rw-r--r--mysql-test/main/join_cache.test63
-rw-r--r--mysql-test/main/join_outer.result103
-rw-r--r--mysql-test/main/join_outer.test122
-rw-r--r--mysql-test/main/join_outer_jcl6.result103
-rw-r--r--mysql-test/main/kill.result5
-rw-r--r--mysql-test/main/kill.test6
-rw-r--r--mysql-test/main/lock_user.result2
-rw-r--r--mysql-test/main/lock_view.result34
-rw-r--r--mysql-test/main/lock_view.test17
-rw-r--r--mysql-test/main/long_unique.result2
-rw-r--r--mysql-test/main/long_unique_bugs.result2
-rw-r--r--mysql-test/main/mix2_myisam.result2
-rw-r--r--mysql-test/main/multi_update.result53
-rw-r--r--mysql-test/main/multi_update.test53
-rw-r--r--mysql-test/main/myisam.result15
-rw-r--r--mysql-test/main/myisam.test1
-rw-r--r--mysql-test/main/myisam_icp.result12
-rw-r--r--mysql-test/main/mysql_upgrade.result84
-rw-r--r--mysql-test/main/mysql_upgrade.test34
-rw-r--r--mysql-test/main/mysqld--help.test2
-rw-r--r--mysql-test/main/mysqldump-nl.test8
-rw-r--r--mysql-test/main/mysqldump-system.result20
-rw-r--r--mysql-test/main/mysqldump.result315
-rw-r--r--mysql-test/main/mysqldump.test37
-rw-r--r--mysql-test/main/opt_trace.result9
-rw-r--r--mysql-test/main/opt_trace.test13
-rw-r--r--mysql-test/main/order_by.result22
-rw-r--r--mysql-test/main/order_by.test8
-rw-r--r--mysql-test/main/parser.result9
-rw-r--r--mysql-test/main/parser.test15
-rw-r--r--mysql-test/main/parser_not_embedded.test2
-rw-r--r--mysql-test/main/password_expiration.result5
-rw-r--r--mysql-test/main/precedence.result4
-rw-r--r--mysql-test/main/precedence.test3
-rw-r--r--mysql-test/main/processlist_notembedded.result16
-rw-r--r--mysql-test/main/processlist_notembedded.test20
-rw-r--r--mysql-test/main/ps.result98
-rw-r--r--mysql-test/main/ps.test81
-rw-r--r--mysql-test/main/ps_show_log.result65
-rw-r--r--mysql-test/main/ps_show_log.test73
-rw-r--r--mysql-test/main/query_cache.result28
-rw-r--r--mysql-test/main/query_cache.test10
-rw-r--r--mysql-test/main/range.result53
-rw-r--r--mysql-test/main/range.test59
-rw-r--r--mysql-test/main/range_mrr_icp.result53
-rw-r--r--mysql-test/main/range_vs_index_merge.result8
-rw-r--r--mysql-test/main/range_vs_index_merge.test8
-rw-r--r--mysql-test/main/range_vs_index_merge_innodb.result8
-rw-r--r--mysql-test/main/selectivity.result58
-rw-r--r--mysql-test/main/selectivity.test45
-rw-r--r--mysql-test/main/selectivity_innodb.result60
-rw-r--r--mysql-test/main/set_statement.result26
-rw-r--r--mysql-test/main/set_statement.test26
-rw-r--r--mysql-test/main/skip_grants.opt (renamed from mysql-test/main/skip_grants-master.opt)0
-rw-r--r--mysql-test/main/skip_grants.result31
-rw-r--r--mysql-test/main/skip_grants.test46
-rw-r--r--mysql-test/main/sp-ucs2.result17
-rw-r--r--mysql-test/main/sp-ucs2.test19
-rw-r--r--mysql-test/main/stat_tables.result14
-rw-r--r--mysql-test/main/stat_tables.test12
-rw-r--r--mysql-test/main/stat_tables_innodb.result14
-rw-r--r--mysql-test/main/status2.result10
-rw-r--r--mysql-test/main/status2.test11
-rw-r--r--mysql-test/main/subselect.result38
-rw-r--r--mysql-test/main/subselect.test35
-rw-r--r--mysql-test/main/subselect4.result98
-rw-r--r--mysql-test/main/subselect4.test85
-rw-r--r--mysql-test/main/subselect_exists2in.result18
-rw-r--r--mysql-test/main/subselect_exists2in.test23
-rw-r--r--mysql-test/main/subselect_mat_cost_bugs.result8
-rw-r--r--mysql-test/main/subselect_mat_cost_bugs.test8
-rw-r--r--mysql-test/main/subselect_no_exists_to_in.result38
-rw-r--r--mysql-test/main/subselect_no_mat.result38
-rw-r--r--mysql-test/main/subselect_no_opts.result38
-rw-r--r--mysql-test/main/subselect_no_scache.result38
-rw-r--r--mysql-test/main/subselect_no_semijoin.result38
-rw-r--r--mysql-test/main/system_mysql_db.result2
-rw-r--r--mysql-test/main/system_mysql_db_507.result1
-rw-r--r--mysql-test/main/system_mysql_db_fix40123.result2
-rw-r--r--mysql-test/main/system_mysql_db_fix50030.result2
-rw-r--r--mysql-test/main/system_mysql_db_fix50117.result2
-rw-r--r--mysql-test/main/table_elim.result2
-rw-r--r--mysql-test/main/table_value_constr.result449
-rw-r--r--mysql-test/main/table_value_constr.test279
-rw-r--r--mysql-test/main/type_decimal.result89
-rw-r--r--mysql-test/main/type_decimal.test44
-rw-r--r--mysql-test/main/type_year.result75
-rw-r--r--mysql-test/main/type_year.test38
-rw-r--r--mysql-test/main/union.result34
-rw-r--r--mysql-test/main/union.test18
-rw-r--r--mysql-test/main/upgrade_MDEV-23102-1.test1
-rw-r--r--mysql-test/main/upgrade_MDEV-23102-2.test1
-rw-r--r--mysql-test/main/upgrade_mdev_24363.result129
-rw-r--r--mysql-test/main/upgrade_mdev_24363.test71
-rw-r--r--mysql-test/main/user_limits.result27
-rw-r--r--mysql-test/main/user_limits.test26
-rw-r--r--mysql-test/main/userstat.result15
-rw-r--r--mysql-test/main/userstat.test22
-rw-r--r--mysql-test/main/view.result82
-rw-r--r--mysql-test/main/view.test89
-rw-r--r--mysql-test/main/win.result26
-rw-r--r--mysql-test/main/win.test20
-rwxr-xr-xmysql-test/mysql-test-run.pl797
-rw-r--r--mysql-test/std_data/galera_certs/INFORMATION2
-rw-r--r--mysql-test/std_data/galera_certs/galera.1.crt25
-rw-r--r--mysql-test/std_data/galera_certs/galera.1.csr16
-rw-r--r--mysql-test/std_data/galera_certs/galera.1.key27
-rw-r--r--mysql-test/std_data/galera_certs/galera.2.crt25
-rw-r--r--mysql-test/std_data/galera_certs/galera.2.csr16
-rw-r--r--mysql-test/std_data/galera_certs/galera.2.key27
-rw-r--r--mysql-test/std_data/galera_certs/galera.root.crt32
-rw-r--r--mysql-test/std_data/galera_certs/galera.root.key54
-rw-r--r--mysql-test/std_data/galera_certs/galera.root.srl1
-rw-r--r--mysql-test/suite/binlog/disabled.def2
-rw-r--r--mysql-test/suite/binlog/include/binlog_write_error.inc108
-rw-r--r--mysql-test/suite/binlog/include/binlog_xa_recover.inc281
-rw-r--r--mysql-test/suite/binlog/r/binlog_checkpoint_flush_logs.result52
-rw-r--r--mysql-test/suite/binlog/r/binlog_spurious_ddl_errors.result7
-rw-r--r--mysql-test/suite/binlog/r/binlog_statement_insert_delayed.result1
-rw-r--r--mysql-test/suite/binlog/r/binlog_truncate_innodb.result185
-rw-r--r--mysql-test/suite/binlog/r/binlog_write_error.result25
-rw-r--r--mysql-test/suite/binlog/r/binlog_xa_recover.result17
-rw-r--r--mysql-test/suite/binlog/t/binlog_checkpoint_flush_logs.test79
-rw-r--r--mysql-test/suite/binlog/t/binlog_spurious_ddl_errors-master.opt1
-rw-r--r--mysql-test/suite/binlog/t/binlog_spurious_ddl_errors.test1
-rw-r--r--mysql-test/suite/binlog/t/binlog_statement_insert_delayed.test1
-rw-r--r--mysql-test/suite/binlog/t/binlog_truncate_innodb-master.opt1
-rw-r--r--mysql-test/suite/binlog/t/binlog_truncate_innodb.test3
-rw-r--r--mysql-test/suite/binlog/t/binlog_write_error.test83
-rw-r--r--mysql-test/suite/binlog/t/binlog_xa_recover.opt (renamed from mysql-test/suite/binlog/t/binlog_xa_recover-master.opt)0
-rw-r--r--mysql-test/suite/binlog/t/binlog_xa_recover.test278
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_write_error.result25
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_write_error.test2
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_xa_recover-master.opt1
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_xa_recover.result18
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_xa_recover.test2
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_semi_sync.result19
-rw-r--r--mysql-test/suite/compat/oracle/r/table_value_constr.result4
-rw-r--r--mysql-test/suite/encryption/r/innodb-checksum-algorithm.result18
-rw-r--r--mysql-test/suite/encryption/r/innodb_encrypt_freed.result100
-rw-r--r--mysql-test/suite/encryption/r/tempfiles_encrypted.result26
-rw-r--r--mysql-test/suite/encryption/t/innodb-discard-import-change.combinations5
-rw-r--r--mysql-test/suite/encryption/t/innodb-discard-import.combinations5
-rw-r--r--mysql-test/suite/encryption/t/innodb_encrypt_freed.opt5
-rw-r--r--mysql-test/suite/encryption/t/innodb_encrypt_freed.test121
-rw-r--r--mysql-test/suite/encryption/t/innodb_encryption.test2
-rw-r--r--mysql-test/suite/engines/funcs/disabled.def95
-rw-r--r--mysql-test/suite/engines/funcs/r/crash_manycolumns_string.result5
-rw-r--r--mysql-test/suite/engines/funcs/r/ix_unique_lob.result16
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl000011.result16
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl000017.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_000010.result (renamed from mysql-test/suite/engines/funcs/r/rpl000010.result)12
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_000011.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_000013.result (renamed from mysql-test/suite/engines/funcs/r/rpl000013.result)19
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_000015.result216
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_000017.result (renamed from mysql-test/suite/engines/funcs/t/rpl000017.test)17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_LD_INFILE.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_REDIRECT.result41
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_alter,innodb.rdiff72
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_alter.result125
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_alter_db.result19
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_bit.result13
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_bit_npk.result15
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_change_master.result29
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_create_database.result48
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_do_grant.result310
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_drop.result13
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_drop_db.result25
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_dual_pos_advance.result22
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_empty_master_crash.result12
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_err_ignoredtable.result21
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_flushlog_loop.result65
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_free_items.result12
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_get_lock.result15
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_ignore_grant.result24
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_ignore_revoke.result26
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_ignore_table_update.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_init_slave.result20
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_insert.result16
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_insert_select.result16
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_loaddata2.result12
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_loaddata_m.result15
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_loaddata_s.result17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_loaddatalocal.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_loadfile.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_log_pos.result51
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_many_optimize.result10
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_master_pos_wait.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_misc_functions.result61
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_multi_delete.result12
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_multi_delete2.result17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_multi_update4.result15
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_ps.result40
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_rbr_to_sbr.result55
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_relayspace.result23
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_replicate_ignore_db.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_NOW.result13
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_USER.result21
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_drop.result39
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_func001.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_inexist_tbl.result60
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_max_relay_size.result35
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_reset_slave.result49
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp001.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp005.result30
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp008.result19
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp009.result17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp010.result15
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp011.result13
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_sp012.result28
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_stop_middle.result19
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_trig001.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_trig002.result18
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_trig003.result19
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_until.result37
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_row_view01.result17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_server_id1.result28
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_server_id2.result35
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_session_var.result29
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sf.result9
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_skip_error.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_slave_status.result107
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff479
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sp.result1029
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sp004.result17
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sp_effects.result96
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_start_stop_slave.result23
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_stm_max_relay_size.result35
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_stm_mystery22.result22
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_stm_no_op.result33
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_stm_reset_slave.result49
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_switch_stm_row_mixed.result116
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_temp_table.result29
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_temporary.result157
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_trigger.result186
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_trunc_temp.result16
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_user_variables.result243
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_variables.result9
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_view.result66
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl000010-slave.opt1
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl000010.test19
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl000011.test17
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl000013.test61
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl000017-slave.opt1
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_000010.test2
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_000011.test18
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_000013.test2
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_000015.test43
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_000017.test2
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_LD_INFILE.test38
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_REDIRECT.test47
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_alter.test24
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_alter_db.test12
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_bit.test93
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_bit_npk.test116
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_change_master.test37
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_create_database.test72
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_do_grant.test98
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_drop.test16
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_drop_db.test61
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_dual_pos_advance.test17
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_empty_master_crash.test15
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test68
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_flushlog_loop.test29
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_free_items.test22
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_get_lock.test49
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_ignore_grant.test59
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_ignore_revoke.test51
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_ignore_table_update.test38
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_init_slave.test34
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_insert.test44
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_insert_select.test7
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_loaddata2.test5
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_loaddata_m.test52
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_loaddata_s.test30
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_loaddatalocal.test19
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_loadfile.test6
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_log_pos.test58
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_many_optimize.test22
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_master_pos_wait.test18
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_misc_functions.test46
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_multi_delete.test26
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_multi_delete2.test68
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_multi_update4.test45
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_ps.test49
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test47
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_relayspace.test34
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_replicate_ignore_db.test30
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_NOW.test74
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_USER.test57
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_drop.test48
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_func001.test57
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_inexist_tbl.test21
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test4
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp001.test146
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp005.test108
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp008.test57
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp009.test102
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp010.test80
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp011.test111
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_sp012.test75
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_stop_middle.test8
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_trig001.test100
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_trig002.test82
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_trig003.test152
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_until.test126
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_row_view01.test82
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_server_id1.test27
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_server_id2.test26
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_session_var.test42
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_sf.test3
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_skip_error.test13
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_slave_status.test58
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_sp.test496
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_sp004.test97
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_sp_effects.test208
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_start_stop_slave.test23
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_stm_mystery22.test20
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_stm_no_op.test93
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test565
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_temp_table.test69
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_temporary.test235
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_trigger.test107
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_trunc_temp.test35
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_user_variables.test57
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_variables.test3
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_view-slave.opt1
-rw-r--r--mysql-test/suite/engines/funcs/t/rpl_view.test155
-rw-r--r--mysql-test/suite/engines/iuds/r/insert_decimal.result17
-rw-r--r--mysql-test/suite/federated/assisted_discovery.result15
-rw-r--r--mysql-test/suite/federated/assisted_discovery.test21
-rw-r--r--mysql-test/suite/federated/federatedx.result26
-rw-r--r--mysql-test/suite/federated/federatedx.test29
-rw-r--r--mysql-test/suite/funcs_1/r/is_check_constraints.result5
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is_embedded.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is.result8
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is_embedded.result8
-rw-r--r--mysql-test/suite/funcs_1/t/is_check_constraints.test4
-rw-r--r--mysql-test/suite/galera/disabled.def15
-rw-r--r--mysql-test/suite/galera/include/galera_st_clean_slave.inc106
-rw-r--r--mysql-test/suite/galera/include/galera_st_disconnect_slave.inc109
-rw-r--r--mysql-test/suite/galera/include/galera_st_kill_slave.inc106
-rw-r--r--mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc114
-rw-r--r--mysql-test/suite/galera/include/galera_st_shutdown_slave.inc105
-rw-r--r--mysql-test/suite/galera/include/galera_start_replication.inc (renamed from mysql-test/suite/galera/include/galera_load_provider.inc)2
-rw-r--r--mysql-test/suite/galera/include/galera_stop_replication.inc (renamed from mysql-test/suite/galera/include/galera_unload_provider.inc)3
-rw-r--r--mysql-test/suite/galera/r/MENT-1047.result4
-rw-r--r--mysql-test/suite/galera/r/galera_UK_conflict.result131
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_replay.result26
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_ps.result16
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result22
-rw-r--r--mysql-test/suite/galera/r/galera_ctas.result88
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_delete.result26
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result20
-rw-r--r--mysql-test/suite/galera/r/galera_fulltext.result28
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup.result555
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result184
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mysqldump,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mysqldump.result368
-rw-r--r--mysql-test/suite/galera/r/galera_ist_restart_joiner.result3
-rw-r--r--mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_ist_rsync.result555
-rw-r--r--mysql-test/suite/galera/r/galera_kill_nochanges.result2
-rw-r--r--mysql-test/suite/galera/r/galera_log_bin_opt.result80
-rw-r--r--mysql-test/suite/galera/r/galera_split_brain.result4
-rw-r--r--mysql-test/suite/galera/r/galera_ssl_upgrade.result10
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup.result552
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result552
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff220
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff18
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump.result744
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump_with_key,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result368
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync.result552
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff210
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync2.result552
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result552
-rw-r--r--mysql-test/suite/galera/r/galera_toi_lock_shared.result21
-rw-r--r--mysql-test/suite/galera/r/galera_truncate.result11
-rw-r--r--mysql-test/suite/galera/r/galera_var_sst_auth.result18
-rw-r--r--mysql-test/suite/galera/r/galera_var_wsrep_on_off.result103
-rw-r--r--mysql-test/suite/galera/r/galera_var_wsrep_provider_options.result10
-rw-r--r--mysql-test/suite/galera/r/galera_var_wsrep_start_position.result (renamed from mysql-test/suite/sys_vars/r/wsrep_start_position_basic.result)64
-rw-r--r--mysql-test/suite/galera/r/galera_virtual_blob.result21
-rw-r--r--mysql-test/suite/galera/r/galera_virtual_column.result19
-rw-r--r--mysql-test/suite/galera/r/lp1376747-4.result10
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#33,debug.rdiff204
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#33.result739
-rw-r--r--mysql-test/suite/galera/t/MDEV-16509.test2
-rw-r--r--mysql-test/suite/galera/t/MENT-1047.test7
-rw-r--r--mysql-test/suite/galera/t/galera#500.test5
-rw-r--r--mysql-test/suite/galera/t/galera_UK_conflict.test276
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_replay.test30
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps.cnf3
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps.test34
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf7
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test54
-rw-r--r--mysql-test/suite/galera/t/galera_bf_kill_debug.test2
-rw-r--r--mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf7
-rw-r--r--mysql-test/suite/galera/t/galera_ctas.test39
-rw-r--r--mysql-test/suite/galera/t/galera_fk_cascade_delete.test19
-rw-r--r--mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test27
-rw-r--r--mysql-test/suite/galera/t/galera_fulltext.test23
-rw-r--r--mysql-test/suite/galera/t/galera_gcache_recover.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_ist_restart_joiner.test4
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin.inc46
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin.test47
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin_opt-master.opt1
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin_opt.cnf15
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin_opt.test2
-rw-r--r--mysql-test/suite/galera/t/galera_rsu_error.test3
-rw-r--r--mysql-test/suite/galera/t/galera_split_brain.test6
-rw-r--r--mysql-test/suite/galera/t/galera_ssl_upgrade.cnf4
-rw-r--r--mysql-test/suite/galera/t/galera_ssl_upgrade.test20
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_lost_found.test2
-rw-r--r--mysql-test/suite/galera/t/galera_toi_ddl_sequential.test3
-rw-r--r--mysql-test/suite/galera/t/galera_toi_lock_shared.test15
-rw-r--r--mysql-test/suite/galera/t/galera_truncate.test26
-rw-r--r--mysql-test/suite/galera/t/galera_truncate_temporary.test3
-rw-r--r--mysql-test/suite/galera/t/galera_var_sst_auth.cnf5
-rw-r--r--mysql-test/suite/galera/t/galera_var_sst_auth.test35
-rw-r--r--mysql-test/suite/galera/t/galera_var_wsrep_on_off.test135
-rw-r--r--mysql-test/suite/galera/t/galera_var_wsrep_provider_options.test11
-rw-r--r--mysql-test/suite/galera/t/galera_var_wsrep_start_position.test (renamed from mysql-test/suite/sys_vars/t/wsrep_start_position_basic.test)38
-rw-r--r--mysql-test/suite/galera/t/galera_virtual_blob.test10
-rw-r--r--mysql-test/suite/galera/t/galera_virtual_column.test42
-rw-r--r--mysql-test/suite/galera/t/lp1376747-4.test36
-rw-r--r--mysql-test/suite/galera_3nodes/disabled.def11
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_2_cluster.result89
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ssl_reload.result15
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_wsrep_schema_init.result94
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_2_cluster.cnf25
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_2_cluster.test148
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test4
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test4
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ssl_reload.cnf10
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ssl_reload.test67
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.cnf5
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.test58
-rw-r--r--mysql-test/suite/galera_sr/disabled.def3
-rw-r--r--mysql-test/suite/galera_sr/r/MDEV-25226.result24
-rw-r--r--mysql-test/suite/galera_sr/r/galera-features#56.result10
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-900.test3
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-25226.test33
-rw-r--r--mysql-test/suite/galera_sr/t/galera-features#56.test9
-rw-r--r--mysql-test/suite/gcol/inc/gcol_column_def_options.inc5
-rw-r--r--mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result14
-rw-r--r--mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result14
-rw-r--r--mysql-test/suite/gcol/r/innodb_virtual_fk.result33
-rw-r--r--mysql-test/suite/gcol/r/virtual_index_drop.result69
-rw-r--r--mysql-test/suite/gcol/t/innodb_virtual_fk.test41
-rw-r--r--mysql-test/suite/gcol/t/virtual_index_drop.test71
-rw-r--r--mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc152
-rw-r--r--mysql-test/suite/innodb/r/alter_large_dml.result10
-rw-r--r--mysql-test/suite/innodb/r/alter_mdl_timeout.result23
-rw-r--r--mysql-test/suite/innodb/r/alter_table.result24
-rw-r--r--mysql-test/suite/innodb/r/alter_varchar_change.result11
-rw-r--r--mysql-test/suite/innodb/r/default_row_format_alter.result11
-rw-r--r--mysql-test/suite/innodb/r/file_format_defaults.result2
-rw-r--r--mysql-test/suite/innodb/r/foreign_key.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-virtual-columns2.result25
-rw-r--r--mysql-test/suite/innodb/r/innodb.result19
-rw-r--r--mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result8
-rw-r--r--mysql-test/suite/innodb/r/innodb_multi_update.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result17
-rw-r--r--mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result17
-rw-r--r--mysql-test/suite/innodb/r/instant_alter.result40
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_charset.result11
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_debug.result162
-rw-r--r--mysql-test/suite/innodb/r/mvcc_secondary.result24
-rw-r--r--mysql-test/suite/innodb/r/temporary_table.result96
-rw-r--r--mysql-test/suite/innodb/r/truncate_foreign.result11
-rw-r--r--mysql-test/suite/innodb/t/alter_large_dml.test10
-rw-r--r--mysql-test/suite/innodb/t/alter_mdl_timeout.opt1
-rw-r--r--mysql-test/suite/innodb/t/alter_mdl_timeout.test32
-rw-r--r--mysql-test/suite/innodb/t/alter_table.test28
-rw-r--r--mysql-test/suite/innodb/t/alter_varchar_change.test12
-rw-r--r--mysql-test/suite/innodb/t/default_row_format_alter.test15
-rw-r--r--mysql-test/suite/innodb/t/foreign_key.test39
-rw-r--r--mysql-test/suite/innodb/t/innodb-virtual-columns2.test20
-rw-r--r--mysql-test/suite/innodb/t/innodb.test18
-rw-r--r--mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test11
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug60049-master.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug60049.test49
-rw-r--r--mysql-test/suite/innodb/t/innodb_multi_update.test1
-rw-r--r--mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt2
-rw-r--r--mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test9
-rw-r--r--mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt2
-rw-r--r--mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test8
-rw-r--r--mysql-test/suite/innodb/t/instant_alter.test34
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_charset.test14
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_debug.combinations4
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_debug.test187
-rw-r--r--mysql-test/suite/innodb/t/mvcc_secondary.test26
-rw-r--r--mysql-test/suite/innodb/t/temporary_table.test107
-rw-r--r--mysql-test/suite/innodb/t/truncate_foreign.test13
-rw-r--r--mysql-test/suite/innodb_fts/r/create.result10
-rw-r--r--mysql-test/suite/innodb_fts/r/fulltext.result30
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result33
-rw-r--r--mysql-test/suite/innodb_fts/r/misc_debug.result26
-rw-r--r--mysql-test/suite/innodb_fts/t/create.test11
-rw-r--r--mysql-test/suite/innodb_fts/t/fulltext.test25
-rw-r--r--mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test43
-rw-r--r--mysql-test/suite/innodb_fts/t/misc_debug.test31
-rw-r--r--mysql-test/suite/innodb_gis/t/1.test12
-rw-r--r--mysql-test/suite/innodb_gis/t/bug16236208.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/create_spatial_index.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/gis.test15
-rw-r--r--mysql-test/suite/innodb_gis/t/precise.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree.test6
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree_purge.test6
-rw-r--r--mysql-test/suite/innodb_zip/r/index_large_prefix.result6
-rw-r--r--mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result2
-rw-r--r--mysql-test/suite/maria/icp.result12
-rw-r--r--mysql-test/suite/maria/maria-ucs2.result4
-rw-r--r--mysql-test/suite/maria/maria.result14
-rw-r--r--mysql-test/suite/maria/maria3.result2
-rw-r--r--mysql-test/suite/maria/mrr.result4
-rw-r--r--mysql-test/suite/mariabackup/error_during_copyback.result10
-rw-r--r--mysql-test/suite/mariabackup/error_during_copyback.test25
-rw-r--r--mysql-test/suite/mariabackup/innodb_force_recovery.result26
-rw-r--r--mysql-test/suite/mariabackup/innodb_force_recovery.test138
-rw-r--r--mysql-test/suite/mariabackup/log_page_corruption.result7
-rw-r--r--mysql-test/suite/mariabackup/log_page_corruption.test18
-rw-r--r--mysql-test/suite/optimizer_unfixed_bugs/r/bug42991.result2
-rw-r--r--mysql-test/suite/optimizer_unfixed_bugs/t/bug42991.test4
-rw-r--r--mysql-test/suite/perfschema/r/schema.result22
-rw-r--r--mysql-test/suite/perfschema/r/table_schema.result22
-rw-r--r--mysql-test/suite/perfschema/r/threads_mysql.result22
-rw-r--r--mysql-test/suite/plugins/r/server_audit.result26
-rw-r--r--mysql-test/suite/plugins/t/multiauth.test1
-rw-r--r--mysql-test/suite/plugins/t/server_audit.test17
-rw-r--r--mysql-test/suite/rpl/disabled.def3
-rw-r--r--mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test4
-rw-r--r--mysql-test/suite/rpl/include/rpl_semi_sync.inc72
-rw-r--r--mysql-test/suite/rpl/r/rpl_change_master.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_ignore_grant.result8
-rw-r--r--mysql-test/suite/rpl/r/rpl_master_pos_wait.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_relay_max_extension.result37
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_USER.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_utf32.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_vcol_crash.result380
-rw-r--r--mysql-test/suite/rpl/r/rpl_semi_sync.result19
-rw-r--r--mysql-test/suite/rpl/r/rpl_semi_sync_after_sync.result19
-rw-r--r--mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_row.result19
-rw-r--r--mysql-test/suite/rpl/r/rpl_semi_sync_slave_compressed_protocol.result19
-rw-r--r--mysql-test/suite/rpl/r/rpl_sp.result18
-rw-r--r--mysql-test/suite/rpl/r/rpl_spec_variables.result60
-rw-r--r--mysql-test/suite/rpl/r/rpl_table_options.result9
-rw-r--r--mysql-test/suite/rpl/t/rpl_change_master.test7
-rw-r--r--mysql-test/suite/rpl/t/rpl_ignore_grant.test24
-rw-r--r--mysql-test/suite/rpl/t/rpl_ignore_table_update.test8
-rw-r--r--mysql-test/suite/rpl/t/rpl_master_pos_wait.test1
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel_retry.test3
-rw-r--r--mysql-test/suite/rpl/t/rpl_relay_max_extension.test109
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_USER.test12
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_create_table.test5
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test6
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_tbl_metadata.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_trig002.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_vcol_crash.test425
-rw-r--r--mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol.test55
-rw-r--r--mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test15
-rw-r--r--mysql-test/suite/rpl/t/rpl_sp.test5
-rw-r--r--mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_spec_variables.test8
-rw-r--r--mysql-test/suite/rpl/t/rpl_table_options.test1
-rw-r--r--mysql-test/suite/sql_sequence/concurrent_create.result13
-rw-r--r--mysql-test/suite/sql_sequence/concurrent_create.test19
-rw-r--r--mysql-test/suite/sql_sequence/create.result2
-rw-r--r--mysql-test/suite/sql_sequence/create.test2
-rw-r--r--mysql-test/suite/sql_sequence/mysqldump.result42
-rw-r--r--mysql-test/suite/sql_sequence/mysqldump.test11
-rw-r--r--mysql-test/suite/sql_sequence/other.result59
-rw-r--r--mysql-test/suite/sql_sequence/other.test64
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvars_server.inc2
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result12
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result77
-rw-r--r--mysql-test/suite/sys_vars/r/max_sort_length_basic.result199
-rw-r--r--mysql-test/suite/sys_vars/r/max_sort_length_func.result442
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff2
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result16
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_embedded.result8
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result8
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_wsrep.result4
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_debug_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_notify_cmd_basic.result47
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_on_without_provider.result5
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_provider_basic.result40
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result46
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test65
-rw-r--r--mysql-test/suite/sys_vars/t/max_sort_length_basic.test225
-rw-r--r--mysql-test/suite/sys_vars/t/max_sort_length_func.test157
-rw-r--r--mysql-test/suite/sys_vars/t/thread_stack_basic.test2
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_notify_cmd_basic.test43
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_on_without_provider.test9
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_provider_basic.test39
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test41
-rw-r--r--mysql-test/suite/unit/suite.pm11
-rw-r--r--mysql-test/suite/vcol/r/vcol_syntax.result38
-rw-r--r--mysql-test/suite/vcol/t/vcol_syntax.test44
-rw-r--r--mysql-test/suite/versioning/common.inc21
-rw-r--r--mysql-test/suite/versioning/common_finish.inc2
-rw-r--r--mysql-test/suite/versioning/r/alter.result68
-rw-r--r--mysql-test/suite/versioning/r/autoinc.result (renamed from mysql-test/suite/versioning/r/auto_increment.result)10
-rw-r--r--mysql-test/suite/versioning/r/delete.result19
-rw-r--r--mysql-test/suite/versioning/r/foreign.result16
-rw-r--r--mysql-test/suite/versioning/r/partition.result43
-rw-r--r--mysql-test/suite/versioning/r/replace.result13
-rw-r--r--mysql-test/suite/versioning/r/trx_id.result8
-rw-r--r--mysql-test/suite/versioning/r/update.result65
-rw-r--r--mysql-test/suite/versioning/t/alter.test62
-rw-r--r--mysql-test/suite/versioning/t/autoinc.test (renamed from mysql-test/suite/versioning/t/auto_increment.test)13
-rw-r--r--mysql-test/suite/versioning/t/delete.test15
-rw-r--r--mysql-test/suite/versioning/t/foreign.test18
-rw-r--r--mysql-test/suite/versioning/t/partition.test54
-rw-r--r--mysql-test/suite/versioning/t/replace.test18
-rw-r--r--mysql-test/suite/versioning/t/trx_id.test6
-rw-r--r--mysql-test/suite/versioning/t/update.test64
-rw-r--r--mysql-test/suite/wsrep/disabled.def5
-rw-r--r--mysql-test/suite/wsrep/r/variables.result375
-rw-r--r--mysql-test/suite/wsrep/r/variables_debug.result254
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_on_basic.result (renamed from mysql-test/suite/sys_vars/r/wsrep_on_basic.result)4
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_variables_no_provider.result44
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_variables_wsrep_off.result39
-rw-r--r--mysql-test/suite/wsrep/t/variables.test157
-rw-r--r--mysql-test/suite/wsrep/t/variables_debug.test153
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_on_basic.opt1
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_on_basic.test (renamed from mysql-test/suite/sys_vars/t/wsrep_on_basic.test)2
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_variables_no_provider.cnf12
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_variables_no_provider.test38
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.cnf12
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.test30
-rw-r--r--mysql-test/unstable-tests642
-rw-r--r--mysys/file_logger.c55
-rw-r--r--mysys/lf_hash.c28
-rw-r--r--mysys/ma_dyncol.c6
-rw-r--r--mysys/mf_iocache.c2
-rw-r--r--mysys/my_addr_resolve.c2
-rw-r--r--mysys/my_seek.c2
-rw-r--r--plugin/auth_ed25519/CMakeLists.txt4
-rw-r--r--plugin/auth_gssapi/CMakeLists.txt2
-rw-r--r--plugin/auth_pam/CMakeLists.txt7
-rw-r--r--plugin/auth_pam/testing/CMakeLists.txt2
-rw-r--r--plugin/feedback/sender_thread.cc3
-rw-r--r--plugin/server_audit/server_audit.c141
-rw-r--r--plugin/userstat/index_stats.cc2
-rw-r--r--plugin/userstat/table_stats.cc3
-rw-r--r--scripts/CMakeLists.txt1
-rw-r--r--scripts/mysql_system_tables.sql2
-rw-r--r--scripts/mysql_system_tables_fix.sql27
-rw-r--r--scripts/mysql_to_mariadb.sql22
-rw-r--r--scripts/mysqld_multi.sh15
-rw-r--r--scripts/mysqld_safe.sh10
-rw-r--r--scripts/wsrep_sst_common.sh3
-rw-r--r--scripts/wsrep_sst_mariabackup.sh10
-rw-r--r--sql/compat56.h4
-rw-r--r--sql/contributors.h13
-rw-r--r--sql/create_options.cc5
-rw-r--r--sql/event_data_objects.cc1
-rw-r--r--sql/event_data_objects.h2
-rw-r--r--sql/event_queue.cc1
-rw-r--r--sql/events.cc12
-rw-r--r--sql/field.cc29
-rw-r--r--sql/field.h51
-rw-r--r--sql/filesort.cc14
-rw-r--r--sql/gcalc_tools.h6
-rw-r--r--sql/gen_win_tzname_data.ps11
-rw-r--r--sql/ha_partition.cc15
-rw-r--r--sql/handler.cc34
-rw-r--r--sql/handler.h16
-rw-r--r--sql/item.cc167
-rw-r--r--sql/item.h85
-rw-r--r--sql/item_buff.cc4
-rw-r--r--sql/item_cmpfunc.cc45
-rw-r--r--sql/item_cmpfunc.h13
-rw-r--r--sql/item_create.cc38
-rw-r--r--sql/item_func.cc74
-rw-r--r--sql/item_geofunc.cc146
-rw-r--r--sql/item_geofunc.h16
-rw-r--r--sql/item_jsonfunc.cc5
-rw-r--r--sql/item_strfunc.cc16
-rw-r--r--sql/item_strfunc.h9
-rw-r--r--sql/item_subselect.cc107
-rw-r--r--sql/item_subselect.h12
-rw-r--r--sql/item_sum.cc20
-rw-r--r--sql/item_timefunc.cc23
-rw-r--r--sql/key.cc9
-rw-r--r--sql/lock.cc17
-rw-r--r--sql/log.cc83
-rw-r--r--sql/log_event.cc72
-rw-r--r--sql/mdl.cc5
-rw-r--r--sql/mdl.h4
-rw-r--r--sql/mf_iocache.cc2
-rw-r--r--sql/mysqld.cc96
-rw-r--r--sql/mysqld.h4
-rw-r--r--sql/opt_range.cc115
-rw-r--r--sql/opt_split.cc52
-rw-r--r--sql/opt_subselect.cc12
-rw-r--r--sql/opt_sum.cc3
-rw-r--r--sql/partition_element.h3
-rw-r--r--sql/partition_info.cc4
-rw-r--r--sql/protocol.cc6
-rw-r--r--sql/rpl_parallel.cc5
-rw-r--r--sql/rpl_rli.cc4
-rw-r--r--sql/semisync_master_ack_receiver.cc5
-rw-r--r--sql/service_wsrep.cc44
-rw-r--r--sql/share/errmsg-utf8.txt31
-rw-r--r--sql/signal_handler.cc17
-rw-r--r--sql/slave.cc287
-rw-r--r--sql/sp.cc2
-rw-r--r--sql/spatial.cc199
-rw-r--r--sql/spatial.h22
-rw-r--r--sql/sql_acl.cc173
-rw-r--r--sql/sql_admin.cc17
-rw-r--r--sql/sql_alter.cc2
-rw-r--r--sql/sql_base.cc177
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_cache.h2
-rw-r--r--sql/sql_class.cc74
-rw-r--r--sql/sql_class.h77
-rw-r--r--sql/sql_connect.cc2
-rw-r--r--sql/sql_cte.cc28
-rw-r--r--sql/sql_delete.cc29
-rw-r--r--sql/sql_derived.cc51
-rw-r--r--sql/sql_handler.cc5
-rw-r--r--sql/sql_help.cc3
-rw-r--r--sql/sql_insert.cc33
-rw-r--r--sql/sql_join_cache.cc90
-rw-r--r--sql/sql_join_cache.h11
-rw-r--r--sql/sql_lex.cc97
-rw-r--r--sql/sql_lex.h48
-rw-r--r--sql/sql_manager.cc87
-rw-r--r--sql/sql_manager.h2
-rw-r--r--sql/sql_parse.cc408
-rw-r--r--sql/sql_parse.h1
-rw-r--r--sql/sql_partition.cc6
-rw-r--r--sql/sql_plugin.h8
-rw-r--r--sql/sql_plugin_services.ic6
-rw-r--r--sql/sql_prepare.cc43
-rw-r--r--sql/sql_reload.cc8
-rw-r--r--sql/sql_repl.cc21
-rw-r--r--sql/sql_repl.h1
-rw-r--r--sql/sql_select.cc284
-rw-r--r--sql/sql_select.h115
-rw-r--r--sql/sql_sequence.cc17
-rw-r--r--sql/sql_sequence.h4
-rw-r--r--sql/sql_show.cc160
-rw-r--r--sql/sql_show.h1
-rw-r--r--sql/sql_statistics.cc108
-rw-r--r--sql/sql_statistics.h11
-rw-r--r--sql/sql_table.cc279
-rw-r--r--sql/sql_table.h2
-rw-r--r--sql/sql_test.cc16
-rw-r--r--sql/sql_truncate.cc32
-rw-r--r--sql/sql_tvc.cc111
-rw-r--r--sql/sql_type.cc151
-rw-r--r--sql/sql_type.h143
-rw-r--r--sql/sql_union.cc68
-rw-r--r--sql/sql_update.cc167
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy80
-rw-r--r--sql/sql_yacc_ora.yy94
-rw-r--r--sql/strfunc.cc16
-rw-r--r--sql/structs.h2
-rw-r--r--sql/sys_vars.cc33
-rw-r--r--sql/table.cc13
-rw-r--r--sql/table.h52
-rw-r--r--sql/temporary_tables.cc16
-rw-r--r--sql/unireg.cc11
-rw-r--r--sql/upgrade_conf_file.cc1
-rw-r--r--sql/win_tzname_data.h1
-rw-r--r--sql/wsrep_check_opts.cc2
-rw-r--r--sql/wsrep_client_service.cc9
-rw-r--r--sql/wsrep_condition_variable.h2
-rw-r--r--sql/wsrep_dummy.cc16
-rw-r--r--sql/wsrep_high_priority_service.cc11
-rw-r--r--sql/wsrep_mysqld.cc150
-rw-r--r--sql/wsrep_mysqld.h48
-rw-r--r--sql/wsrep_notify.cc12
-rw-r--r--sql/wsrep_priv.h2
-rw-r--r--sql/wsrep_schema.cc94
-rw-r--r--sql/wsrep_server_service.cc20
-rw-r--r--sql/wsrep_sst.cc125
-rw-r--r--sql/wsrep_thd.cc44
-rw-r--r--sql/wsrep_trans_observer.h24
-rw-r--r--sql/wsrep_var.cc233
-rw-r--r--sql/wsrep_var.h1
-rw-r--r--storage/archive/ha_archive.cc19
-rw-r--r--storage/archive/ha_archive.h14
-rw-r--r--storage/cassandra/ha_cassandra.cc42
-rw-r--r--storage/connect/CMakeLists.txt55
-rw-r--r--storage/connect/block.h42
-rw-r--r--storage/connect/bson.cpp1788
-rw-r--r--storage/connect/bson.h207
-rw-r--r--storage/connect/bsonudf.cpp6245
-rw-r--r--storage/connect/bsonudf.h411
-rw-r--r--storage/connect/cmgfam.cpp49
-rw-r--r--storage/connect/cmgfam.h7
-rw-r--r--storage/connect/colblk.cpp4
-rw-r--r--storage/connect/colblk.h2
-rw-r--r--storage/connect/connect.cc9
-rw-r--r--storage/connect/filamap.cpp15
-rw-r--r--storage/connect/filamtxt.cpp464
-rw-r--r--storage/connect/filamtxt.h45
-rw-r--r--storage/connect/filamvct.cpp27
-rw-r--r--storage/connect/filamzip.cpp4
-rw-r--r--storage/connect/global.h27
-rw-r--r--storage/connect/ha_connect.cc186
-rw-r--r--storage/connect/jdbconn.cpp1
-rw-r--r--storage/connect/jmgfam.cpp41
-rw-r--r--storage/connect/jmgfam.h7
-rw-r--r--storage/connect/jmgoconn.cpp4
-rw-r--r--storage/connect/json.cpp1903
-rw-r--r--storage/connect/json.h474
-rw-r--r--storage/connect/jsonudf.cpp509
-rw-r--r--storage/connect/jsonudf.h98
-rw-r--r--storage/connect/libdoc.cpp2
-rw-r--r--storage/connect/mycat.cc52
-rw-r--r--storage/connect/mysql-test/connect/disabled.def5
-rw-r--r--storage/connect/mysql-test/connect/r/alter_xml.result4
-rw-r--r--storage/connect/mysql-test/connect/r/alter_xml2.result4
-rw-r--r--storage/connect/mysql-test/connect/r/bson.result517
-rw-r--r--storage/connect/mysql-test/connect/r/bson_java_2.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_java_3.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_mongo_c.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_udf.result685
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_oracle.result8
-rw-r--r--storage/connect/mysql-test/connect/r/json.result148
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_2.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_3.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_mongo_c.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_udf.result6
-rw-r--r--storage/connect/mysql-test/connect/r/json_udf_bin.result2
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_c.result35
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_2.result35
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_3.result35
-rw-r--r--storage/connect/mysql-test/connect/r/odbc_oracle.result38
-rw-r--r--storage/connect/mysql-test/connect/r/rest.result19
-rw-r--r--storage/connect/mysql-test/connect/r/xml.result3
-rw-r--r--storage/connect/mysql-test/connect/r/xml2.result38
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_html.result6
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_mult.result4
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_zip.result24
-rw-r--r--storage/connect/mysql-test/connect/r/xml_html.result6
-rw-r--r--storage/connect/mysql-test/connect/r/xml_mult.result4
-rw-r--r--storage/connect/mysql-test/connect/r/xml_zip.result24
-rw-r--r--storage/connect/mysql-test/connect/r/zip.result42
-rw-r--r--storage/connect/mysql-test/connect/t/alter_xml.test2
-rw-r--r--storage/connect/mysql-test/connect/t/alter_xml2.test2
-rw-r--r--storage/connect/mysql-test/connect/t/bson.test294
-rw-r--r--storage/connect/mysql-test/connect/t/bson_java_2.test14
-rw-r--r--storage/connect/mysql-test/connect/t/bson_java_3.test14
-rw-r--r--storage/connect/mysql-test/connect/t/bson_mongo_c.test10
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf.inc72
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf.test282
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf2.inc63
-rw-r--r--storage/connect/mysql-test/connect/t/ini_grant.result89
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_oracle.test8
-rw-r--r--storage/connect/mysql-test/connect/t/json.test146
-rw-r--r--storage/connect/mysql-test/connect/t/json_java_2.test1
-rw-r--r--storage/connect/mysql-test/connect/t/json_java_3.test1
-rw-r--r--storage/connect/mysql-test/connect/t/mongo_test.inc37
-rw-r--r--storage/connect/mysql-test/connect/t/odbc_oracle.test30
-rw-r--r--storage/connect/mysql-test/connect/t/rest.inc17
-rw-r--r--storage/connect/mysql-test/connect/t/rest.test17
-rw-r--r--storage/connect/mysql-test/connect/t/xml.test1
-rw-r--r--storage/connect/mysql-test/connect/t/xml2.test46
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_html.test6
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_mult.test4
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_zip.test24
-rw-r--r--storage/connect/mysql-test/connect/t/xml_html.test6
-rw-r--r--storage/connect/mysql-test/connect/t/xml_mult.test4
-rw-r--r--storage/connect/mysql-test/connect/t/xml_zip.test24
-rw-r--r--storage/connect/mysql-test/connect/t/zip.test30
-rw-r--r--storage/connect/myutil.cpp3
-rw-r--r--storage/connect/plgdbsem.h5
-rw-r--r--storage/connect/plugutil.cpp78
-rw-r--r--storage/connect/tabbson.cpp2562
-rw-r--r--storage/connect/tabbson.h339
-rw-r--r--storage/connect/tabdos.cpp7
-rw-r--r--storage/connect/tabfmt.cpp29
-rw-r--r--storage/connect/tabjson.cpp651
-rw-r--r--storage/connect/tabjson.h30
-rw-r--r--storage/connect/tabrest.cpp117
-rw-r--r--storage/connect/tabrest.h5
-rw-r--r--storage/connect/tabutil.cpp2
-rw-r--r--storage/connect/tabvir.cpp19
-rw-r--r--storage/connect/user_connect.cc3
-rw-r--r--storage/connect/value.cpp6
-rw-r--r--storage/connect/value.h12
-rw-r--r--storage/connect/xobject.h1
-rw-r--r--storage/csv/ha_tina.cc9
-rw-r--r--storage/federated/ha_federated.cc19
-rw-r--r--storage/federatedx/federatedx_io_mysql.cc16
-rw-r--r--storage/federatedx/federatedx_io_null.cc10
-rw-r--r--storage/federatedx/ha_federatedx.cc27
-rw-r--r--storage/federatedx/ha_federatedx.h7
-rw-r--r--storage/heap/ha_heap.cc3
-rw-r--r--storage/innobase/.clang-format-old (renamed from storage/innobase/.clang-format)0
-rw-r--r--storage/innobase/btr/btr0btr.cc80
-rw-r--r--storage/innobase/btr/btr0bulk.cc28
-rw-r--r--storage/innobase/btr/btr0cur.cc86
-rw-r--r--storage/innobase/btr/btr0defragment.cc9
-rw-r--r--storage/innobase/btr/btr0pcur.cc56
-rw-r--r--storage/innobase/btr/btr0sea.cc102
-rw-r--r--storage/innobase/buf/buf0buf.cc7
-rw-r--r--storage/innobase/buf/buf0flu.cc12
-rw-r--r--storage/innobase/data/data0data.cc4
-rw-r--r--storage/innobase/dict/dict0dict.cc13
-rw-r--r--storage/innobase/dict/dict0mem.cc7
-rw-r--r--storage/innobase/dict/dict0stats.cc40
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc19
-rw-r--r--storage/innobase/fil/fil0fil.cc74
-rw-r--r--storage/innobase/fts/fts0fts.cc27
-rw-r--r--storage/innobase/gis/gis0rtree.cc68
-rw-r--r--storage/innobase/gis/gis0sea.cc67
-rw-r--r--storage/innobase/handler/ha_innodb.cc864
-rw-r--r--storage/innobase/handler/ha_innodb.h7
-rw-r--r--storage/innobase/handler/handler0alter.cc250
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc23
-rw-r--r--storage/innobase/include/btr0bulk.h2
-rw-r--r--storage/innobase/include/btr0pcur.h6
-rw-r--r--storage/innobase/include/data0data.h11
-rw-r--r--storage/innobase/include/dict0dict.h2
-rw-r--r--storage/innobase/include/dict0mem.h76
-rw-r--r--storage/innobase/include/fil0fil.h2
-rw-r--r--storage/innobase/include/gis0rtree.ic5
-rw-r--r--storage/innobase/include/ha_prototypes.h2
-rw-r--r--storage/innobase/include/os0file.h3
-rw-r--r--storage/innobase/include/page0cur.ic5
-rw-r--r--storage/innobase/include/page0page.ic2
-rw-r--r--storage/innobase/include/que0que.h3
-rw-r--r--storage/innobase/include/rem0rec.h16
-rw-r--r--storage/innobase/include/row0ins.h1
-rw-r--r--storage/innobase/include/row0log.h5
-rw-r--r--storage/innobase/include/row0merge.h20
-rw-r--r--storage/innobase/include/row0mysql.h8
-rw-r--r--storage/innobase/include/srv0srv.h7
-rw-r--r--storage/innobase/include/trx0sys.h7
-rw-r--r--storage/innobase/include/trx0trx.h5
-rw-r--r--storage/innobase/include/univ.i2
-rw-r--r--storage/innobase/include/ut0ut.h2
-rw-r--r--storage/innobase/lock/lock0lock.cc271
-rw-r--r--storage/innobase/log/log0recv.cc9
-rw-r--r--storage/innobase/os/os0file.cc32
-rw-r--r--storage/innobase/page/page0cur.cc61
-rw-r--r--storage/innobase/page/page0page.cc33
-rw-r--r--storage/innobase/page/page0zip.cc59
-rw-r--r--storage/innobase/pars/pars0pars.cc1
-rw-r--r--storage/innobase/que/que0que.cc3
-rw-r--r--storage/innobase/rem/rem0rec.cc189
-rw-r--r--storage/innobase/row/row0ftsort.cc6
-rw-r--r--storage/innobase/row/row0import.cc94
-rw-r--r--storage/innobase/row/row0ins.cc82
-rw-r--r--storage/innobase/row/row0log.cc39
-rw-r--r--storage/innobase/row/row0merge.cc50
-rw-r--r--storage/innobase/row/row0mysql.cc88
-rw-r--r--storage/innobase/row/row0purge.cc14
-rw-r--r--storage/innobase/row/row0row.cc14
-rw-r--r--storage/innobase/row/row0sel.cc177
-rw-r--r--storage/innobase/row/row0uins.cc8
-rw-r--r--storage/innobase/row/row0umod.cc19
-rw-r--r--storage/innobase/row/row0undo.cc7
-rw-r--r--storage/innobase/row/row0upd.cc186
-rw-r--r--storage/innobase/row/row0vers.cc40
-rw-r--r--storage/innobase/srv/srv0srv.cc8
-rw-r--r--storage/innobase/srv/srv0start.cc17
-rw-r--r--storage/innobase/trx/trx0i_s.cc14
-rw-r--r--storage/innobase/trx/trx0rec.cc5
-rw-r--r--storage/innobase/trx/trx0rseg.cc47
-rw-r--r--storage/innobase/trx/trx0sys.cc2
-rw-r--r--storage/innobase/trx/trx0trx.cc59
-rw-r--r--storage/innobase/ut/ut0ut.cc50
-rw-r--r--storage/maria/ha_maria.cc3
-rw-r--r--storage/maria/ma_bitmap.c5
-rw-r--r--storage/maria/ma_blockrec.c24
-rw-r--r--storage/maria/ma_check.c11
-rw-r--r--storage/maria/ma_create.c18
-rw-r--r--storage/maria/ma_crypt.c4
-rw-r--r--storage/maria/ma_dynrec.c13
-rw-r--r--storage/maria/ma_extra.c5
-rw-r--r--storage/maria/ma_open.c43
-rw-r--r--storage/maria/ma_packrec.c23
-rw-r--r--storage/maria/ma_recovery.c1
-rw-r--r--storage/maria/maria_def.h2
-rw-r--r--storage/mroonga/ha_mroonga.cpp31
-rw-r--r--storage/mroonga/lib/mrn_debug_column_access.cpp2
-rw-r--r--storage/mroonga/lib/mrn_debug_column_access.hpp6
-rw-r--r--storage/mroonga/lib/mrn_multiple_column_key_codec.cpp3
-rw-r--r--storage/mroonga/vendor/groonga/lib/alloc.c14
-rw-r--r--storage/mroonga/vendor/groonga/lib/db.c2
-rw-r--r--storage/mroonga/vendor/groonga/lib/pat.c4
-rw-r--r--storage/mroonga/vendor/groonga/lib/proc/proc_select.c3
-rw-r--r--storage/mroonga/vendor/groonga/lib/str.c5
-rw-r--r--storage/myisam/myisampack.c15
-rw-r--r--storage/oqgraph/ha_oqgraph.cc10
-rw-r--r--storage/perfschema/pfs_engine_table.cc27
-rw-r--r--storage/perfschema/pfs_instr.cc20
-rw-r--r--storage/perfschema/pfs_instr.h2
-rw-r--r--storage/perfschema/table_accounts.cc4
-rw-r--r--storage/perfschema/table_esgs_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esgs_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esgs_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esms_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esms_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esms_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_ews_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_ews_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_ews_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_hosts.cc2
-rw-r--r--storage/perfschema/table_setup_actors.cc6
-rw-r--r--storage/perfschema/table_threads.cc4
-rw-r--r--storage/perfschema/table_users.cc2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc11
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/issue896.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result6
-rw-r--r--storage/rocksdb/rdb_datadic.cc6
-rw-r--r--storage/sequence/sequence.cc4
-rw-r--r--storage/sphinx/ha_sphinx.cc4
-rw-r--r--storage/spider/ha_spider.cc12
-rw-r--r--storage/spider/mysql-test/spider/r/basic_sql.result4
-rw-r--r--storage/spider/mysql-test/spider/t/basic_sql.test6
-rw-r--r--storage/spider/spd_db_conn.cc60
-rw-r--r--storage/spider/spd_db_mysql.cc17
-rw-r--r--storage/tokudb/ha_tokudb.cc16
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_decimal.result5
-rw-r--r--strings/ctype-simple.c5
-rw-r--r--strings/ctype-uca.c7
-rw-r--r--strings/ctype-ucs2.c10
-rw-r--r--strings/decimal.c63
-rw-r--r--strings/json_lib.c1
-rw-r--r--support-files/mariadb.pc.in5
-rw-r--r--support-files/mariadb.service.in2
-rw-r--r--support-files/mariadb@.service.in2
-rw-r--r--support-files/rpm/server-postin.sh6
-rw-r--r--support-files/rpm/server-posttrans.sh11
-rwxr-xr-xtests/grant.pl750
-rw-r--r--unittest/mysys/stacktrace-t.c2
-rw-r--r--vio/viossl.c74
-rw-r--r--win/packaging/heidisql.cmake2
m---------wsrep-lib0
-rw-r--r--zlib/CMakeLists.txt39
1175 files changed, 51711 insertions, 21789 deletions
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 44e74441de9..c3c7acecf96 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -141,7 +141,7 @@ elif [ "x$warning_mode" = "xmaintainer" ]; then
debug_extra_cflags="-g3"
else
# Both C and C++ warnings
- warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized -Wno-strict-aliasing -Wimplicit-fallthrough=2"
+ warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized -Wno-strict-aliasing -Wimplicit-fallthrough=2 -Wformat-security -Wvla"
# For more warnings, uncomment the following line
# warnings="$warnings -Wshadow"
diff --git a/BUILD/compile-pentium64-ubsan b/BUILD/compile-pentium64-ubsan
new file mode 100755
index 00000000000..bf56d84283f
--- /dev/null
+++ b/BUILD/compile-pentium64-ubsan
@@ -0,0 +1,29 @@
+#! /bin/sh
+# Copyright (c) 2018, MariaDB Corporation.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+
+# Compilation with UBSAN, The Undefined Behavior Sanitizer
+# We have to use -Wno-uninitialized and -Wno-unitialized we get a lot of false
+# positive warnings for this when compiling with -fsanitize=undefined.
+# We also have to compile without Spider as linking with Spider library does
+# not work. (errno: 11, undefined symbol: _ZTI12ha_partition)
+
+path=`dirname $0`
+. "$path/SETUP.sh"
+
+extra_flags="$pentium64_cflags $debug_cflags -fsanitize=undefined -DWITH_UBSAN -Wno-conversion -Wno-uninitialized"
+extra_configs="$pentium_configs $debug_configs -DWITH_UBSAN=ON -DMYSQL_MAINTAINER_MODE=NO --without-spider --without-tokudb"
+
+. "$path/FINISH.sh"
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8cf62593ae7..f3d4ae49744 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,5 +1,5 @@
# Copyright (c) 2006, 2017, Oracle and/or its affiliates.
-# Copyright (c) 2008, 2020, MariaDB Corporation.
+# Copyright (c) 2008, 2021, MariaDB Corporation.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -132,7 +132,7 @@ ENDIF()
# NUMA
SET(WITH_NUMA "AUTO" CACHE STRING "Build with non-uniform memory access, allowing --innodb-numa-interleave. Options are ON|OFF|AUTO. ON = enabled (requires NUMA library), OFF = disabled, AUTO = enabled if NUMA library found.")
-SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
+SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "Enable MariaDB maintainer-specific warnings. One of: NO (warnings are disabled) WARN (warnings are enabled) ERR (warnings are errors) AUTO (warnings are errors in Debug only)")
# Packaging
IF (NOT CPACK_GENERATOR)
@@ -196,21 +196,17 @@ OPTION(WITH_ASAN "Enable address sanitizer" OFF)
IF (WITH_ASAN AND NOT MSVC)
# this flag might be set by default on some OS
MY_CHECK_AND_SET_COMPILER_FLAG("-U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO)
- # gcc 4.8.1 and new versions of clang
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -fPIC"
DEBUG RELWITHDEBINFO)
SET(HAVE_C_FSANITIZE ${have_C__fsanitize_address__fPIC})
SET(HAVE_CXX_FSANITIZE ${have_CXX__fsanitize_address__fPIC})
IF(HAVE_C_FSANITIZE AND HAVE_CXX_FSANITIZE)
+ OPTION(WITH_ASAN_SCOPE "Enable -fsanitize-address-use-after-scope" OFF)
SET(WITH_ASAN_OK 1)
- ELSE()
- # older versions of clang
- MY_CHECK_AND_SET_COMPILER_FLAG("-faddress-sanitizer -fPIC"
- DEBUG RELWITHDEBINFO)
- SET(HAVE_C_FADDRESS ${have_C__faddress_sanitizer__fPIC})
- SET(HAVE_CXX_FADDRESS ${have_CXX__faddress_sanitizer__fPIC})
- IF(HAVE_C_FADDRESS AND HAVE_CXX_FADDRESS)
- SET(WITH_ASAN_OK 1)
+ IF(WITH_ASAN_SCOPE)
+ MY_CHECK_AND_SET_COMPILER_FLAG(
+ "-fsanitize=address -fsanitize-address-use-after-scope"
+ DEBUG RELWITHDEBINFO)
ENDIF()
ENDIF()
@@ -509,8 +505,7 @@ ADD_CUSTOM_TARGET(INFO_BIN ALL
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
-INSTALL_DOCUMENTATION(README.md CREDITS COPYING THIRDPARTY
- EXCEPTIONS-CLIENT COMPONENT Readme)
+INSTALL_DOCUMENTATION(README.md CREDITS COPYING THIRDPARTY COMPONENT Readme)
# MDEV-6526 these files are not installed anymore
#INSTALL_DOCUMENTATION(${CMAKE_BINARY_DIR}/Docs/INFO_SRC
diff --git a/CREDITS b/CREDITS
index 9f03f7fc24e..f5e87e18752 100644
--- a/CREDITS
+++ b/CREDITS
@@ -4,24 +4,19 @@ organization registered in the USA.
The current main sponsors of the MariaDB Foundation are:
Alibaba Cloud https://www.alibabacloud.com/ (2017)
-Booking.com https://www.booking.com (2013)
MariaDB Corporation https://www.mariadb.com (2013)
Microsoft https://microsoft.com/ (2017)
+ServiceNow https://servicenow.com (2019)
Tencent Cloud https://cloud.tencent.com (2017)
Development Bank of Singapore https://dbs.com (2016)
IBM https://www.ibm.com (2017)
-Tencent Games http://game.qq.com/ (2018)
Visma https://visma.com (2015)
-Acronis https://acronis.com (2016)
-Nexedi https://www.nexedi.com (2016)
+Automattic https://automattic.com (2019)
+Galera Cluster https://galeracluster.com (2020)
Percona https://www.percona.com (2018)
-Tencent Game DBA http://tencentdba.com/about (2016)
-Tencent TDSQL http://tdsql.org (2016)
-Verkkokauppa.com https://www.verkkokauppa.com (2015)
-Virtuozzo https://virtuozzo.com (2016)
For a full list of sponsors, see
-https://mariadb.org/about/supporters/
+https://mariadb.org/about/#sponsors
and for individual contributors, see
https://mariadb.org/donate/individual-sponsors/
diff --git a/EXCEPTIONS-CLIENT b/EXCEPTIONS-CLIENT
deleted file mode 100644
index 11ba42c422f..00000000000
--- a/EXCEPTIONS-CLIENT
+++ /dev/null
@@ -1,136 +0,0 @@
- FOSS License Exception
-
- http://www.mysql.com/about/legal/licensing/foss-exception/
- Updated February 23, 2012
-
-What is the FOSS License Exception?
-
- Oracle's Free and Open Source Software ("FOSS") License Exception
- (formerly known as the FLOSS License Exception) allows developers of FOSS
- applications to include Oracle's MySQL Client Libraries (also referred to
- as "MySQL Drivers" or "MySQL Connectors") with their FOSS applications.
- MySQL Client Libraries are typically licensed pursuant to version 2 of the
- General Public License ("GPL"), but this exception permits distribution of
- certain MySQL Client Libraries with a developer's FOSS applications
- licensed under the terms of another FOSS license listed below, even though
- such other FOSS license may be incompatible with the GPL.
-
- The following terms and conditions describe the circumstances under which
- Oracle's FOSS License Exception applies.
-
-Oracle's FOSS License Exception Terms and Conditions
-
- 1. Definitions. "Derivative Work" means a derivative work, as defined
- under applicable copyright law, formed entirely from the Program and
- one or more FOSS Applications.
-
- "FOSS Application" means a free and open source software application
- distributed subject to a license listed in the section below titled
- "FOSS License List."
-
- "FOSS Notice" means a notice placed by Oracle or MySQL in a copy of
- the MySQL Client Libraries stating that such copy of the MySQL Client
- Libraries may be distributed under Oracle's or MySQL's FOSS (or FLOSS)
- License Exception.
-
- "Independent Work" means portions of the Derivative Work that are not
- derived from the Program and can reasonably be considered independent
- and separate works.
-
- "Program" means a copy of Oracle's MySQL Client Libraries that
- contains a FOSS Notice.
-
- 2. A FOSS application developer ("you" or "your") may distribute a
- Derivative Work provided that you and the Derivative Work meet all of
- the following conditions:
-
- a. You obey the GPL in all respects for the Program and all portions
- (including modifications) of the Program included in the
- Derivative Work (provided that this condition does not apply to
- Independent Works);
- b. The Derivative Work does not include any work licensed under the
- GPL other than the Program;
- c. You distribute Independent Works subject to a license listed in
- the section below titled "FOSS License List";
- d. You distribute Independent Works in object code or executable
- form with the complete corresponding machine-readable source code
- on the same medium and under the same FOSS license applying to
- the object code or executable forms;
- e. All works that are aggregated with the Program or the Derivative
- Work on a medium or volume of storage are not derivative works of
- the Program, Derivative Work or FOSS Application, and must
- reasonably be considered independent and separate works.
-
- 3. Oracle reserves all rights not expressly granted in these terms and
- conditions. If all of the above conditions are not met, then this FOSS
- License Exception does not apply to you or your Derivative Work.
-
-FOSS License List
-
-+------------------------------------------------------------------------+
-|License Name |Version(s)/Copyright Date|
-|----------------------------------------------+-------------------------|
-|Academic Free License |2.0 |
-|----------------------------------------------+-------------------------|
-|Apache Software License |1.0/1.1/2.0 |
-|----------------------------------------------+-------------------------|
-|Apple Public Source License |2.0 |
-|----------------------------------------------+-------------------------|
-|Artistic license |From Perl 5.8.0 |
-|----------------------------------------------+-------------------------|
-|BSD license |"July 22 1999" |
-|----------------------------------------------+-------------------------|
-|Common Development and Distribution License |1.0 |
-|(CDDL) | |
-|----------------------------------------------+-------------------------|
-|Common Public License |1.0 |
-|----------------------------------------------+-------------------------|
-|Eclipse Public License |1.0 |
-|----------------------------------------------+-------------------------|
-|European Union Public License (EUPL)¹ |1.1 |
-|----------------------------------------------+-------------------------|
-|GNU Affero General Public License (AGPL) |3.0 |
-|----------------------------------------------+-------------------------|
-|GNU Library or "Lesser" General Public License|2.0/2.1/3.0 |
-|(LGPL) | |
-|----------------------------------------------+-------------------------|
-|GNU General Public License (GPL) |3.0 |
-|----------------------------------------------+-------------------------|
-|IBM Public License |1.0 |
-|----------------------------------------------+-------------------------|
-|Jabber Open Source License |1.0 |
-|----------------------------------------------+-------------------------|
-|MIT License (As listed in file |- |
-|MIT-License.txt) | |
-|----------------------------------------------+-------------------------|
-|Mozilla Public License (MPL) |1.0/1.1 |
-|----------------------------------------------+-------------------------|
-|Open Software License |2.0 |
-|----------------------------------------------+-------------------------|
-|OpenSSL license (with original SSLeay license)|"2003" ("1998") |
-|----------------------------------------------+-------------------------|
-|PHP License |3.0/3.01 |
-|----------------------------------------------+-------------------------|
-|Python license (CNRI Python License) |- |
-|----------------------------------------------+-------------------------|
-|Python Software Foundation License |2.1.1 |
-|----------------------------------------------+-------------------------|
-|Sleepycat License |"1999" |
-|----------------------------------------------+-------------------------|
-|University of Illinois/NCSA Open Source |- |
-|License | |
-|----------------------------------------------+-------------------------|
-|W3C License |"2001" |
-|----------------------------------------------+-------------------------|
-|X11 License |"2001" |
-|----------------------------------------------+-------------------------|
-|Zlib/libpng License |- |
-|----------------------------------------------+-------------------------|
-|Zope Public License |2.0 |
-+------------------------------------------------------------------------+
-
-¹) When an Independent Work is licensed under a "Compatible License"
-pursuant to the EUPL, the Compatible License rather than the EUPL is the
-applicable license for purposes of these FOSS License Exception Terms and
-Conditions.
-
diff --git a/VERSION b/VERSION
index 9d0f9661bdd..5cbf2f4e130 100644
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,4 @@
MYSQL_VERSION_MAJOR=10
MYSQL_VERSION_MINOR=4
-MYSQL_VERSION_PATCH=18
+MYSQL_VERSION_PATCH=19
SERVER_MATURITY=stable
diff --git a/client/mysql.cc b/client/mysql.cc
index 3cae7241969..b7a2d6c5e72 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -4704,7 +4704,11 @@ sql_real_connect(char *host,char *database,char *user,char *password,
return -1; // Retryable
}
- charset_info= get_charset_by_name(mysql.charset->name, MYF(0));
+ if (!(charset_info= get_charset_by_name(mysql.charset->name, MYF(0))))
+ {
+ put_info("Unknown default character set", INFO_ERROR);
+ return 1;
+ }
connected=1;
diff --git a/client/mysqldump.c b/client/mysqldump.c
index d6ff2dd275f..fc5b3bfabaf 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -42,6 +42,11 @@
/* on merge conflict, bump to a higher version again */
#define DUMP_VERSION "10.19"
+/**
+ First mysql version supporting sequences.
+*/
+#define FIRST_SEQUENCE_VERSION 100300
+
#include <my_global.h>
#include <my_sys.h>
#include <my_user.h>
@@ -92,6 +97,11 @@
/* Max length GTID position that we will output. */
#define MAX_GTID_LENGTH 1024
+/* Dump sequence/tables control */
+#define DUMP_TABLE_ALL -1
+#define DUMP_TABLE_TABLE 0
+#define DUMP_TABLE_SEQUENCE 1
+
static my_bool ignore_table_data(const uchar *hash_key, size_t len);
static void add_load_option(DYNAMIC_STRING *str, const char *option,
const char *option_value);
@@ -1063,6 +1073,20 @@ static int get_options(int *argc, char ***argv)
if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option)))
return(ho_error);
+ /*
+ Dumping under --system=stats with --replace or --inser-ignore is safe and will not
+ retult into race condition. Otherwise dump only structure and ignore data by default
+ while dumping.
+ */
+ if (!(opt_system & OPT_SYSTEM_STATS) && !(opt_ignore || opt_replace_into))
+ {
+ if (my_hash_insert(&ignore_data,
+ (uchar*) my_strdup("mysql.innodb_index_stats", MYF(MY_WME))) ||
+ my_hash_insert(&ignore_data,
+ (uchar*) my_strdup("mysql.innodb_table_stats", MYF(MY_WME))))
+ return(EX_EOM);
+ }
+
if (opt_system & OPT_SYSTEM_ALL)
opt_system|= ~0;
@@ -3867,14 +3891,6 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key,
DBUG_ENTER("dump_table");
/*
- Check does table has a sequence structure and if has apply different sql queries
- */
- if (check_if_ignore_table(table, table_type) & IGNORE_SEQUENCE_TABLE)
- {
- get_sequence_structure(table, db);
- DBUG_VOID_RETURN;
- }
- /*
Make sure you get the create table info before the following check for
--no-data flag below. Otherwise, the create table info won't be printed.
*/
@@ -4358,18 +4374,36 @@ err:
} /* dump_table */
-static char *getTableName(int reset)
+static char *getTableName(int reset, int want_sequences)
{
MYSQL_ROW row;
if (!get_table_name_result)
{
- if (!(get_table_name_result= mysql_list_tables(mysql,NullS)))
- return(NULL);
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION)
+ {
+ const char *query= "SHOW FULL TABLES";
+ if (mysql_query_with_error_report(mysql, 0, query))
+ return (NULL);
+
+ if (!(get_table_name_result= mysql_store_result(mysql)))
+ return (NULL);
+ }
+ else
+ {
+ if (!(get_table_name_result= mysql_list_tables(mysql,NullS)))
+ return(NULL);
+ }
}
if ((row= mysql_fetch_row(get_table_name_result)))
- return((char*) row[0]);
+ {
+ if (want_sequences != DUMP_TABLE_ALL)
+ while (row && MY_TEST(strcmp(row[1], "SEQUENCE")) == want_sequences)
+ row= mysql_fetch_row(get_table_name_result);
+ if (row)
+ return((char*) row[0]);
+ }
if (reset)
mysql_data_seek(get_table_name_result,0); /* We want to read again */
else
@@ -4762,7 +4796,7 @@ static int dump_all_servers()
static int dump_all_stats()
{
- my_bool prev_no_create_info;
+ my_bool prev_no_create_info, prev_opt_replace_into;
if (mysql_select_db(mysql, "mysql"))
{
@@ -4770,6 +4804,8 @@ static int dump_all_stats()
return 1; /* If --force */
}
fprintf(md_result_file,"\nUSE mysql;\n");
+ prev_opt_replace_into= opt_replace_into;
+ opt_replace_into|= !opt_ignore;
prev_no_create_info= opt_no_create_info;
opt_no_create_info= 1; /* don't overwrite recreate tables */
/* EITS added in 10.0.1 */
@@ -4788,6 +4824,7 @@ static int dump_all_stats()
dump_table("innodb_table_stats", "mysql", NULL, 0);
}
opt_no_create_info= prev_no_create_info;
+ opt_replace_into= prev_opt_replace_into;
return 0;
}
@@ -4798,12 +4835,14 @@ static int dump_all_stats()
static int dump_all_timezones()
{
- my_bool opt_prev_no_create_info;
+ my_bool opt_prev_no_create_info, opt_prev_replace_into;
if (mysql_select_db(mysql, "mysql"))
{
DB_error(mysql, "when selecting the database");
return 1; /* If --force */
}
+ opt_prev_replace_into= opt_replace_into;
+ opt_replace_into|= !opt_ignore;
opt_prev_no_create_info= opt_no_create_info;
opt_no_create_info= 1;
fprintf(md_result_file,"\nUSE mysql;\n");
@@ -4813,6 +4852,7 @@ static int dump_all_timezones()
dump_table("time_zone_transition", "mysql", NULL, 0);
dump_table("time_zone_transition_type", "mysql", NULL, 0);
opt_no_create_info= opt_prev_no_create_info;
+ opt_replace_into= opt_prev_replace_into;
return 0;
}
@@ -5302,7 +5342,7 @@ static int dump_all_tables_in_db(char *database)
{
DYNAMIC_STRING query;
init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024);
- for (numrows= 0 ; (table= getTableName(1)) ; )
+ for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_ALL)) ; )
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key,end - hash_key))
@@ -5336,7 +5376,19 @@ static int dump_all_tables_in_db(char *database)
DBUG_RETURN(1);
}
}
- while ((table= getTableName(0)))
+
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION &&
+ !opt_no_create_info)
+ {
+ // First process sequences
+ while ((table= getTableName(1, DUMP_TABLE_SEQUENCE)))
+ {
+ char *end= strmov(afterdot, table);
+ if (include_table((uchar*) hash_key, end - hash_key))
+ get_sequence_structure(table, database);
+ }
+ }
+ while ((table= getTableName(0, DUMP_TABLE_TABLE)))
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key, end - hash_key))
@@ -5485,7 +5537,7 @@ static my_bool dump_all_views_in_db(char *database)
{
DYNAMIC_STRING query;
init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024);
- for (numrows= 0 ; (table= getTableName(1)); )
+ for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_TABLE)); )
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key,end - hash_key))
@@ -5508,7 +5560,7 @@ static my_bool dump_all_views_in_db(char *database)
else
verbose_msg("-- dump_all_views_in_db : logs flushed successfully!\n");
}
- while ((table= getTableName(0)))
+ while ((table= getTableName(0, DUMP_TABLE_TABLE)))
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key, end - hash_key))
@@ -5638,7 +5690,7 @@ static int get_sys_var_lower_case_table_names()
static int dump_selected_tables(char *db, char **table_names, int tables)
{
- char table_buff[NAME_LEN*2+3];
+ char table_buff[NAME_LEN*2+3], table_type[NAME_LEN];
DYNAMIC_STRING lock_tables_query;
char **dump_tables, **pos, **end;
int lower_case_table_names;
@@ -5735,9 +5787,22 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
DBUG_RETURN(1);
}
}
+
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION)
+ {
+ /* Dump Sequence first */
+ for (pos= dump_tables; pos < end; pos++)
+ {
+ DBUG_PRINT("info",("Dumping sequence(?) %s", *pos));
+ if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE)
+ get_sequence_structure(*pos, db);
+ }
+ }
/* Dump each selected table */
for (pos= dump_tables; pos < end; pos++)
{
+ if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE)
+ continue;
DBUG_PRINT("info",("Dumping table %s", *pos));
dump_table(*pos, db, NULL, 0);
if (opt_dump_triggers &&
diff --git a/client/mysqlslap.c b/client/mysqlslap.c
index ed5e20ca18c..c44bf8446f9 100644
--- a/client/mysqlslap.c
+++ b/client/mysqlslap.c
@@ -1177,9 +1177,6 @@ get_options(int *argc,char ***argv)
if (debug_check_flag)
my_end_arg= MY_CHECK_ERROR;
- if (!user)
- user= (char *)"root";
-
/*
If something is created and --no-drop is not specified, we drop the
schema.
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 73a6f03d889..5dfcce53e83 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -10905,7 +10905,7 @@ int get_next_bit(REP_SET *set,uint lastpos)
start=set->bits+ ((lastpos+1) / WORD_BIT);
end=set->bits + set->size_of_bits;
- bits=start[0] & ~((1 << ((lastpos+1) % WORD_BIT)) -1);
+ bits=start[0] & ~((1U << ((lastpos+1) % WORD_BIT)) -1);
while (! bits && ++start < end)
bits=start[0];
diff --git a/cmake/configure.pl b/cmake/configure.pl
index c296c5ba4b8..4085110b6fa 100644
--- a/cmake/configure.pl
+++ b/cmake/configure.pl
@@ -93,6 +93,11 @@ foreach my $option (@ARGV)
{
$option = substr($option, 2);
}
+ elsif (substr ($option, 0, 2) eq "-D")
+ {
+ # Must be cmake config option
+ $option = substr($option, 1);
+ }
else
{
# This must be environment variable
@@ -119,6 +124,11 @@ foreach my $option (@ARGV)
$just_print=1;
next;
}
+ if ($option =~ /D.*=/)
+ {
+ $cmakeargs = $cmakeargs." -".$option;
+ next;
+ }
if($option =~ /with-plugins=/)
{
my @plugins= split(/,/, substr($option,13));
diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake
index 2318e9d5dea..8c24d2484ea 100644
--- a/cmake/install_macros.cmake
+++ b/cmake/install_macros.cmake
@@ -181,7 +181,7 @@ IF(WIN32)
MARK_AS_ADVANCED(SIGNCODE)
IF(SIGNCODE)
SET(SIGNTOOL_PARAMETERS
- /a /t http://timestamp.verisign.com/scripts/timstamp.dll
+ /a /t http://timestamp.globalsign.com/?signature=sha2
CACHE STRING "parameters for signtool (list)")
FIND_PROGRAM(SIGNTOOL_EXECUTABLE signtool
PATHS "$ENV{ProgramFiles}/Microsoft SDKs/Windows/v7.0A/bin"
diff --git a/cmake/maintainer.cmake b/cmake/maintainer.cmake
index ab70ef3aea1..8dbe7a6df47 100644
--- a/cmake/maintainer.cmake
+++ b/cmake/maintainer.cmake
@@ -1,5 +1,5 @@
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2020, MariaDB
+# Copyright (c) 2011, 2021, MariaDB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,7 +14,7 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
-IF(MSVC)
+IF(MYSQL_MAINTAINER_MODE STREQUAL "NO")
RETURN()
ENDIF()
@@ -46,7 +46,7 @@ IF(CMAKE_COMPILER_IS_GNUCC AND CMAKE_C_COMPILER_VERSION VERSION_LESS "6.0.0")
SET(MY_ERROR_FLAGS ${MY_ERROR_FLAGS} -Wno-error=maybe-uninitialized)
ENDIF()
-IF(MYSQL_MAINTAINER_MODE MATCHES "OFF")
+IF(MYSQL_MAINTAINER_MODE MATCHES "OFF|WARN")
RETURN()
ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO")
SET(WHERE DEBUG)
diff --git a/cmake/make_dist.cmake.in b/cmake/make_dist.cmake.in
index f471c6e53d0..d662dd76730 100644
--- a/cmake/make_dist.cmake.in
+++ b/cmake/make_dist.cmake.in
@@ -43,6 +43,13 @@ IF(GIT_EXECUTABLE)
MESSAGE(STATUS "Running git checkout-index")
EXECUTE_PROCESS(
COMMAND "${GIT_EXECUTABLE}" checkout-index --all --prefix=${PACKAGE_DIR}/
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ RESULT_VARIABLE RESULT
+ )
+ IF(NOT RESULT EQUAL 0)
+ SET(GIT_EXECUTABLE)
+ ENDIF()
+ EXECUTE_PROCESS(
COMMAND "${GIT_EXECUTABLE}" submodule foreach "${GIT_EXECUTABLE} checkout-index --all --prefix=${PACKAGE_DIR}/$path/"
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
RESULT_VARIABLE RESULT
diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake
index 149fdad231f..bbf68add7d7 100644
--- a/cmake/os/WindowsCache.cmake
+++ b/cmake/os/WindowsCache.cmake
@@ -89,6 +89,7 @@ SET(HAVE_LRAND48 CACHE INTERNAL "")
SET(HAVE_LSTAT CACHE INTERNAL "")
SET(HAVE_MADVISE CACHE INTERNAL "")
SET(HAVE_MALLINFO CACHE INTERNAL "")
+SET(HAVE_MALLINFO2 CACHE INTERNAL "")
SET(HAVE_MALLOC_H 1 CACHE INTERNAL "")
SET(HAVE_MEMALIGN CACHE INTERNAL "")
SET(HAVE_MEMCPY 1 CACHE INTERNAL "")
diff --git a/config.h.cmake b/config.h.cmake
index 652ea683ce4..a959d79c7ac 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -173,6 +173,7 @@
#cmakedefine HAVE_DECL_MADVISE 1
#cmakedefine HAVE_DECL_MHA_MAPSIZE_VA 1
#cmakedefine HAVE_MALLINFO 1
+#cmakedefine HAVE_MALLINFO2 1
#cmakedefine HAVE_MEMCPY 1
#cmakedefine HAVE_MEMMOVE 1
#cmakedefine HAVE_MKSTEMP 1
diff --git a/configure.cmake b/configure.cmake
index 1c8265e29f1..a88868eb83a 100644
--- a/configure.cmake
+++ b/configure.cmake
@@ -134,6 +134,7 @@ IF(UNIX)
IF(NOT LIBRT)
MY_SEARCH_LIBS(clock_gettime rt LIBRT)
ENDIF()
+ set(THREADS_PREFER_PTHREAD_FLAG ON)
FIND_PACKAGE(Threads)
SET(CMAKE_REQUIRED_LIBRARIES
@@ -365,6 +366,7 @@ CHECK_FUNCTION_EXISTS (localtime_r HAVE_LOCALTIME_R)
CHECK_FUNCTION_EXISTS (lstat HAVE_LSTAT)
CHECK_FUNCTION_EXISTS (madvise HAVE_MADVISE)
CHECK_FUNCTION_EXISTS (mallinfo HAVE_MALLINFO)
+CHECK_FUNCTION_EXISTS (mallinfo2 HAVE_MALLINFO2)
CHECK_FUNCTION_EXISTS (memcpy HAVE_MEMCPY)
CHECK_FUNCTION_EXISTS (memmove HAVE_MEMMOVE)
CHECK_FUNCTION_EXISTS (mkstemp HAVE_MKSTEMP)
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh
index d1c0d779269..a8b1010643b 100755
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@ -96,6 +96,12 @@ then
sed '/Package: mariadb-plugin-rocksdb/,/^$/d' -i debian/control
fi
+## Skip TokuDB if arch is not amd64
+if [[ ! $(dpkg-architecture -q DEB_BUILD_ARCH) =~ amd64 ]]
+then
+ sed '/Package: mariadb-plugin-tokudb/,/^$/d' -i debian/control
+fi
+
# Always remove aws plugin, see -DNOT_FOR_DISTRIBUTION in CMakeLists.txt
sed '/Package: mariadb-plugin-aws-key-management-10.2/,/^$/d' -i debian/control
@@ -105,6 +111,15 @@ then
sed '/Package: mariadb-plugin-cassandra/,/^$/d' -i debian/control
fi
+# From Debian Stretch/Ubuntu Bionic onwards dh-systemd is just an empty
+# transitional metapackage and the functionality was merged into debhelper.
+# In Ubuntu Hirsute is was completely removed, so it can't be referenced anymore.
+# Keep using it only on Debian Jessie and Ubuntu Xenial.
+if apt-cache madison dh-systemd | grep 'dh-systemd' >/dev/null 2>&1
+then
+ sed 's/debhelper (>= 9.20160709~),/debhelper (>= 9), dh-systemd,/' -i debian/control
+fi
+
# Mroonga, TokuDB never built on Travis CI anyway, see build flags above
if [[ $TRAVIS ]]
then
diff --git a/debian/control b/debian/control
index ee49410c1bd..fbab7ff862e 100644
--- a/debian/control
+++ b/debian/control
@@ -5,7 +5,7 @@ Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net>
Build-Depends: bison,
chrpath,
cmake (>= 2.7),
- debhelper (>= 9),
+ debhelper (>= 9.20160709~),
dh-apparmor,
dh-exec,
dh-systemd,
diff --git a/debian/libmariadb3.install b/debian/libmariadb3.install
index 8636166a493..82c4b2a3f1d 100644
--- a/debian/libmariadb3.install
+++ b/debian/libmariadb3.install
@@ -3,3 +3,4 @@ usr/lib/mysql/plugin/client_ed25519.so
usr/lib/mysql/plugin/dialog.so
usr/lib/mysql/plugin/mysql_clear_password.so
usr/lib/mysql/plugin/sha256_password.so
+usr/lib/mysql/plugin/caching_sha2_password.so
diff --git a/debian/mariadb-server-10.4.postinst b/debian/mariadb-server-10.4.postinst
index 3db4d50ea08..4e759a1d54b 100644
--- a/debian/mariadb-server-10.4.postinst
+++ b/debian/mariadb-server-10.4.postinst
@@ -165,8 +165,8 @@ EOF
;;
triggered)
- if [ -x "$(command -v systemctl)" ]; then
- systemctl daemon-reload
+ if [ -d /run/systemd/system ]; then
+ systemctl --system daemon-reload
fi
invoke restart
;;
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc
index 80191418388..f52d288f42d 100644
--- a/extra/mariabackup/backup_copy.cc
+++ b/extra/mariabackup/backup_copy.cc
@@ -1062,6 +1062,7 @@ copy_file(ds_ctxt_t *datasink,
ds_file_t *dstfile = NULL;
datafile_cur_t cursor;
xb_fil_cur_result_t res;
+ DBUG_ASSERT(datasink->datasink->remove);
const char *dst_path =
(xtrabackup_copy_back || xtrabackup_move_back)?
dst_file_path : trim_dotslash(dst_file_path);
@@ -1087,6 +1088,7 @@ copy_file(ds_ctxt_t *datasink,
if (ds_write(dstfile, cursor.buf, cursor.buf_read)) {
goto error;
}
+ DBUG_EXECUTE_IF("copy_file_error", errno=ENOSPC;goto error;);
}
if (res == XB_FIL_CUR_ERROR) {
@@ -1108,6 +1110,7 @@ copy_file(ds_ctxt_t *datasink,
error:
datafile_close(&cursor);
if (dstfile != NULL) {
+ datasink->datasink->remove(dstfile->path);
ds_close(dstfile);
}
@@ -1152,17 +1155,18 @@ move_file(ds_ctxt_t *datasink,
if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) {
if (my_errno == EXDEV) {
- bool ret;
- ret = copy_file(datasink, src_file_path,
- dst_file_path, thread_n);
+ /* Fallback to copy/unlink */
+ if(!copy_file(datasink, src_file_path,
+ dst_file_path, thread_n))
+ return false;
msg(thread_n,"Removing %s", src_file_path);
if (unlink(src_file_path) != 0) {
my_strerror(errbuf, sizeof(errbuf), errno);
- msg("Error: unlink %s failed: %s",
+ msg("Warning: unlink %s failed: %s",
src_file_path,
errbuf);
}
- return(ret);
+ return true;
}
my_strerror(errbuf, sizeof(errbuf), my_errno);
msg("Can not move file %s to %s: %s",
diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc
index 4c0ef77760c..162980acb21 100644
--- a/extra/mariabackup/backup_mysql.cc
+++ b/extra/mariabackup/backup_mysql.cc
@@ -44,6 +44,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <mysql.h>
#include <mysqld.h>
#include <my_sys.h>
+#include <stdlib.h>
#include <string.h>
#include <limits>
#include "common.h"
@@ -108,6 +109,13 @@ xb_mysql_connect()
return(NULL);
}
+#if !defined(DONT_USE_MYSQL_PWD)
+ if (!opt_password)
+ {
+ opt_password=getenv("MYSQL_PWD");
+ }
+#endif
+
if (!opt_secure_auth) {
mysql_options(connection, MYSQL_SECURE_AUTH,
(char *) &opt_secure_auth);
diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h
index 201bbfd3267..5c82556b9ba 100644
--- a/extra/mariabackup/datasink.h
+++ b/extra/mariabackup/datasink.h
@@ -50,9 +50,15 @@ struct datasink_struct {
ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
int (*write)(ds_file_t *file, const unsigned char *buf, size_t len);
int (*close)(ds_file_t *file);
+ int (*remove)(const char *path);
void (*deinit)(ds_ctxt_t *ctxt);
};
+
+static inline int dummy_remove(const char *) {
+ return 0;
+}
+
/* Supported datasink types */
typedef enum {
DS_TYPE_STDOUT,
diff --git a/extra/mariabackup/ds_archive.cc b/extra/mariabackup/ds_archive.cc
index c8fcfa1f5f5..3a5081119b3 100644
--- a/extra/mariabackup/ds_archive.cc
+++ b/extra/mariabackup/ds_archive.cc
@@ -57,6 +57,7 @@ datasink_t datasink_archive = {
&archive_open,
&archive_write,
&archive_close,
+ &dummy_remove,
&archive_deinit
};
diff --git a/extra/mariabackup/ds_buffer.cc b/extra/mariabackup/ds_buffer.cc
index 720a329c238..6e6ec6fcfbc 100644
--- a/extra/mariabackup/ds_buffer.cc
+++ b/extra/mariabackup/ds_buffer.cc
@@ -54,6 +54,7 @@ datasink_t datasink_buffer = {
&buffer_open,
&buffer_write,
&buffer_close,
+ &dummy_remove,
&buffer_deinit
};
diff --git a/extra/mariabackup/ds_compress.cc b/extra/mariabackup/ds_compress.cc
index 487718e2ac0..3c28e55e1fe 100644
--- a/extra/mariabackup/ds_compress.cc
+++ b/extra/mariabackup/ds_compress.cc
@@ -75,6 +75,7 @@ datasink_t datasink_compress = {
&compress_open,
&compress_write,
&compress_close,
+ &dummy_remove,
&compress_deinit
};
diff --git a/extra/mariabackup/ds_local.cc b/extra/mariabackup/ds_local.cc
index fb2ea0a1629..41a00dd9c39 100644
--- a/extra/mariabackup/ds_local.cc
+++ b/extra/mariabackup/ds_local.cc
@@ -43,12 +43,18 @@ static int local_write(ds_file_t *file, const uchar *buf, size_t len);
static int local_close(ds_file_t *file);
static void local_deinit(ds_ctxt_t *ctxt);
+static int local_remove(const char *path)
+{
+ return unlink(path);
+}
+
extern "C" {
datasink_t datasink_local = {
&local_init,
&local_open,
&local_write,
&local_close,
+ &local_remove,
&local_deinit
};
}
diff --git a/extra/mariabackup/ds_stdout.cc b/extra/mariabackup/ds_stdout.cc
index 85dbb83865b..3b3091bd426 100644
--- a/extra/mariabackup/ds_stdout.cc
+++ b/extra/mariabackup/ds_stdout.cc
@@ -40,6 +40,7 @@ datasink_t datasink_stdout = {
&stdout_open,
&stdout_write,
&stdout_close,
+ &dummy_remove,
&stdout_deinit
};
diff --git a/extra/mariabackup/ds_tmpfile.cc b/extra/mariabackup/ds_tmpfile.cc
index 22dff165aa0..d5316803926 100644
--- a/extra/mariabackup/ds_tmpfile.cc
+++ b/extra/mariabackup/ds_tmpfile.cc
@@ -51,6 +51,7 @@ datasink_t datasink_tmpfile = {
&tmpfile_open,
&tmpfile_write,
&tmpfile_close,
+ &dummy_remove,
&tmpfile_deinit
};
diff --git a/extra/mariabackup/ds_xbstream.cc b/extra/mariabackup/ds_xbstream.cc
index 5a753b08474..daf1cc73038 100644
--- a/extra/mariabackup/ds_xbstream.cc
+++ b/extra/mariabackup/ds_xbstream.cc
@@ -50,6 +50,7 @@ datasink_t datasink_xbstream = {
&xbstream_open,
&xbstream_write,
&xbstream_close,
+ &dummy_remove,
&xbstream_deinit
};
diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc
index bc944c18659..1341d2c46de 100644
--- a/extra/mariabackup/innobackupex.cc
+++ b/extra/mariabackup/innobackupex.cc
@@ -208,7 +208,8 @@ enum innobackupex_options
OPT_STREAM,
OPT_TABLES_FILE,
OPT_THROTTLE,
- OPT_USE_MEMORY
+ OPT_USE_MEMORY,
+ OPT_INNODB_FORCE_RECOVERY,
};
ibx_mode_t ibx_mode = IBX_MODE_BACKUP;
@@ -626,6 +627,16 @@ static struct my_option ibx_long_options[] =
0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
1024*1024L, 0},
+ {"innodb-force-recovery", OPT_INNODB_FORCE_RECOVERY,
+ "This option starts up the embedded InnoDB instance in crash "
+ "recovery mode to ignore page corruption; should be used "
+ "with the \"--apply-log\" option, in emergencies only. The "
+ "default value is 0. Refer to \"innodb_force_recovery\" server "
+ "system variable documentation for more details.",
+ (uchar*)&xtrabackup_innodb_force_recovery,
+ (uchar*)&xtrabackup_innodb_force_recovery,
+ 0, GET_ULONG, OPT_ARG, 0, 0, SRV_FORCE_IGNORE_CORRUPT, 0, 0, 0},
+
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -671,6 +682,7 @@ innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chu
innobackupex --apply-log [--use-memory=B]\n\
[--defaults-file=MY.CNF]\n\
[--export] [--ibbackup=IBBACKUP-BINARY]\n\
+ [--innodb-force-recovery=1]\n\
BACKUP-DIR\n\
\n\
innobackupex --copy-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 45d0d0e1111..de4462bc01f 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -4,7 +4,7 @@ MariaBackup: hot backup tool for InnoDB
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
-(c) 2017, 2020, MariaDB Corporation.
+(c) 2017, 2021, MariaDB Corporation.
Portions written by Marko Mäkelä.
This program is free software; you can redistribute it and/or modify
@@ -61,6 +61,10 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <sys/resource.h>
#endif
+#ifdef __APPLE__
+# include "libproc.h"
+#endif
+
#include <btr0sea.h>
#include <dict0priv.h>
@@ -270,6 +274,12 @@ static char *xtrabackup_debug_sync = NULL;
my_bool xtrabackup_incremental_force_scan = FALSE;
+/*
+ * Ignore corrupt pages (disabled by default; used
+ * by "innobackupex" as a command line argument).
+ */
+ulong xtrabackup_innodb_force_recovery = 0;
+
/* The flushed lsn which is read from data files */
lsn_t flushed_lsn= 0;
@@ -1046,7 +1056,8 @@ enum options_xtrabackup
OPT_BACKUP_ROCKSDB,
OPT_XTRA_CHECK_PRIVILEGES,
OPT_XTRA_MYSQLD_ARGS,
- OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION
+ OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION,
+ OPT_INNODB_FORCE_RECOVERY
};
struct my_option xb_client_options[]= {
@@ -1673,6 +1684,13 @@ struct my_option xb_server_options[] =
&opt_check_privileges, &opt_check_privileges,
0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
+ {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
+ "(for --prepare): Crash recovery mode (ignores "
+ "page corruption; for emergencies only).",
+ (G_PTR*)&srv_force_recovery,
+ (G_PTR*)&srv_force_recovery,
+ 0, GET_ULONG, OPT_ARG, 0, 0, SRV_FORCE_IGNORE_CORRUPT, 0, 0, 0},
+
{"mysqld-args", OPT_XTRA_MYSQLD_ARGS,
"All arguments that follow this argument are considered as server "
"options, and if some of them are not supported by mariabackup, they "
@@ -1809,31 +1827,33 @@ static int prepare_export()
// Process defaults-file , it can have some --lc-language stuff,
// which is* unfortunately* still necessary to get mysqld up
- if (strncmp(orig_argv1,"--defaults-file=",16) == 0)
+ if (strncmp(orig_argv1,"--defaults-file=", 16) == 0)
{
snprintf(cmdline, sizeof cmdline,
- IF_WIN("\"","") "\"%s\" --mysqld \"%s\" "
+ IF_WIN("\"","") "\"%s\" --mysqld \"%s\""
" --defaults-extra-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
" --innodb --innodb-fast-shutdown=0 --loose-partition"
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
- " --console --skip-log-error --skip-log-bin --bootstrap < "
+ " --console --skip-log-error --skip-log-bin --bootstrap %s< "
BOOTSTRAP_FILENAME IF_WIN("\"",""),
- mariabackup_exe,
+ mariabackup_exe,
orig_argv1, (my_defaults_group_suffix?my_defaults_group_suffix:""),
- xtrabackup_use_memory);
+ xtrabackup_use_memory,
+ (srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
}
else
{
- sprintf(cmdline,
- IF_WIN("\"","") "\"%s\" --mysqld"
+ snprintf(cmdline, sizeof cmdline,
+ IF_WIN("\"","") "\"%s\" --mysqld"
" --defaults-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=."
" --innodb --innodb-fast-shutdown=0 --loose-partition"
" --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu"
- " --console --log-error= --skip-log-bin --bootstrap < "
+ " --console --log-error= --skip-log-bin --bootstrap %s< "
BOOTSTRAP_FILENAME IF_WIN("\"",""),
mariabackup_exe,
(my_defaults_group_suffix?my_defaults_group_suffix:""),
- xtrabackup_use_memory);
+ xtrabackup_use_memory,
+ (srv_force_recovery ? "--innodb-force-recovery=1 " : ""));
}
msg("Prepare export : executing %s\n", cmdline);
@@ -1981,6 +2001,13 @@ xb_get_one_option(int optid,
ADD_PRINT_PARAM_OPT(innobase_buffer_pool_filename);
break;
+ case OPT_INNODB_FORCE_RECOVERY:
+
+ if (srv_force_recovery) {
+ ADD_PRINT_PARAM_OPT(srv_force_recovery);
+ }
+ break;
+
case OPT_XTRA_TARGET_DIR:
strmake(xtrabackup_real_target_dir,argument, sizeof(xtrabackup_real_target_dir)-1);
xtrabackup_target_dir= xtrabackup_real_target_dir;
@@ -2230,6 +2257,29 @@ static bool innodb_init_param()
srv_undo_dir = (char*) ".";
}
+ compile_time_assert(SRV_FORCE_IGNORE_CORRUPT == 1);
+
+ /*
+ * This option can be read both from the command line, and the
+ * defaults file. The assignment should account for both cases,
+ * and for "--innobackupex". Since the command line argument is
+ * parsed after the defaults file, it takes precedence.
+ */
+ if (xtrabackup_innodb_force_recovery) {
+ srv_force_recovery = xtrabackup_innodb_force_recovery;
+ }
+
+ if (srv_force_recovery >= SRV_FORCE_IGNORE_CORRUPT) {
+ if (!xtrabackup_prepare) {
+ msg("mariabackup: The option \"innodb_force_recovery\""
+ " should only be used with \"%s\".",
+ (innobackupex_mode ? "--apply-log" : "--prepare"));
+ goto error;
+ } else {
+ msg("innodb_force_recovery = %lu", srv_force_recovery);
+ }
+ }
+
#ifdef _WIN32
srv_use_native_aio = TRUE;
#endif
@@ -4672,13 +4722,12 @@ fail_before_log_copying_thread_start:
log_file_op = NULL;
pthread_mutex_destroy(&backup_mutex);
pthread_cond_destroy(&scanned_lsn_cond);
- if (opt_log_innodb_page_corruption && !corrupted_pages.empty()) {
+ if (!corrupted_pages.empty()) {
+ ut_ad(opt_log_innodb_page_corruption);
msg("Error: corrupted innodb pages are found and logged to "
MB_CORRUPTED_PAGES_FILE " file");
- return false;
}
- else
- return(true);
+ return(true);
}
@@ -5776,6 +5825,10 @@ static bool xtrabackup_prepare_func(char** argv)
ut_ad(inc_dir_tables_hash);
}
+ msg("open files limit requested %u, set to %u",
+ (uint) xb_open_files_limit,
+ xb_set_max_open_files(xb_open_files_limit));
+
/* Fix DDL for prepare. Process .del,.ren, and .new files.
The order in which files are processed, is important
(see MDEV-18185, MDEV-18201)
@@ -6020,12 +6073,9 @@ static bool xtrabackup_prepare_func(char** argv)
srv_shutdown_bg_undo_sources();
srv_purge_shutdown();
buf_flush_sync_all_buf_pools();
- innodb_shutdown();
- innobase_space_shutdown();
}
- else
- innodb_shutdown();
+ innodb_shutdown();
innodb_free_param();
/* output to metadata file */
@@ -6447,8 +6497,9 @@ void handle_options(int argc, char **argv, char ***argv_server,
}
}
+ mariabackup_args.push_back(nullptr);
*argv_client= *argv_server= *argv_backup= &mariabackup_args[0];
- int argc_backup= static_cast<int>(mariabackup_args.size());
+ int argc_backup= static_cast<int>(mariabackup_args.size() - 1);
int argc_client= argc_backup;
int argc_server= argc_backup;
@@ -6611,6 +6662,8 @@ int main(int argc, char **argv)
char **client_defaults;
char **backup_defaults;
+ my_getopt_prefix_matching= 0;
+
if (get_exepath(mariabackup_exe,FN_REFLEN, argv[0]))
strncpy(mariabackup_exe,argv[0], FN_REFLEN-1);
@@ -6913,6 +6966,12 @@ static int get_exepath(char *buf, size_t size, const char *argv0)
ssize_t ret = readlink("/proc/self/exe", buf, size-1);
if(ret > 0)
return 0;
+#elif defined(__APPLE__)
+ size_t ret = proc_pidpath(getpid(), buf, static_cast<uint32_t>(size));
+ if (ret > 0) {
+ buf[ret] = 0;
+ return 0;
+ }
#endif
return my_realpath(buf, argv0, 0);
diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h
index 4db1738e64d..bf5313d35db 100644
--- a/extra/mariabackup/xtrabackup.h
+++ b/extra/mariabackup/xtrabackup.h
@@ -174,6 +174,8 @@ enum binlog_info_enum { BINLOG_INFO_OFF, BINLOG_INFO_ON,
extern ulong opt_binlog_info;
+extern ulong xtrabackup_innodb_force_recovery;
+
void xtrabackup_io_throttling(void);
my_bool xb_write_delta_metadata(const char *filename,
const xb_delta_info_t *info);
diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c
index 06cde330abb..9880d4d60d7 100644
--- a/extra/my_print_defaults.c
+++ b/extra/my_print_defaults.c
@@ -93,7 +93,7 @@ static void usage()
{
version();
puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to modify and redistribute it under the GPL license\n");
- puts("Prints all arguments that is give to some program using the default files");
+ puts("Displays the options from option groups of option files, which is useful to see which options a particular tool will use");
printf("Usage: %s [OPTIONS] [groups]\n", my_progname);
my_print_help(my_long_options);
my_print_default_files(config_file);
diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt
index 953d377ebcf..c99fb155dd6 100644
--- a/extra/wolfssl/CMakeLists.txt
+++ b/extra/wolfssl/CMakeLists.txt
@@ -9,20 +9,32 @@ ENDIF()
IF(CMAKE_SIZEOF_VOID_P MATCHES 8)
IF(MSVC)
SET(WOLFSSL_INTELASM ON)
+ SET(WOLFSSL_X86_64_BUILD 1)
+ SET(HAVE_INTEL_RDSEED 1)
+ SET(HAVE_INTEL_RDRAND 1)
ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64")
+ SET(WOLFSSL_X86_64_BUILD 1)
IF(CMAKE_C_COMPILER_ID MATCHES GNU AND CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9)
MESSAGE_ONCE(NO_INTEL_ASSEMBLY "Disable Intel assembly for WolfSSL - compiler is too old")
- ELSEIF(WITH_MSAN)
- MESSAGE_ONCE(MSAN_CANT_HANDLE_IT
- "Disable Intel assembly for WolfSSL - MSAN can't handle it")
ELSE()
- MY_CHECK_C_COMPILER_FLAG(-maes)
- MY_CHECK_C_COMPILER_FLAG(-msse4)
- MY_CHECK_C_COMPILER_FLAG(-mpclmul)
+ IF(WITH_MSAN)
+ MESSAGE_ONCE(MSAN_CANT_HANDLE_IT
+ "Disable Intel assembly for WolfSSL - MSAN can't handle it")
+ ELSE()
+ MY_CHECK_C_COMPILER_FLAG(-maes)
+ MY_CHECK_C_COMPILER_FLAG(-msse4)
+ MY_CHECK_C_COMPILER_FLAG(-mpclmul)
+ IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul)
+ SET(WOLFSSL_INTELASM ON)
+ ENDIF()
+ ENDIF()
MY_CHECK_C_COMPILER_FLAG(-mrdrnd)
MY_CHECK_C_COMPILER_FLAG(-mrdseed)
- IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul)
- SET(WOLFSSL_INTELASM ON)
+ IF(have_C__mrdrnd)
+ SET(HAVE_INTEL_RDRAND ON)
+ ENDIF()
+ IF(have_C__mrdseed)
+ SET(HAVE_INTEL_RDSEED ON)
ENDIF()
ENDIF()
ENDIF()
@@ -44,7 +56,7 @@ ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL)
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl)
IF(MSVC)
# size_t to long truncation warning
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028 -wd4244")
ENDIF()
ADD_CONVENIENCE_LIBRARY(wolfssl ${WOLFSSL_SOURCES})
@@ -109,33 +121,22 @@ ELSE()
SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/integer.c)
ENDIF()
-IF(WOLFSSL_INTELASM)
- SET(WOLFSSL_AESNI 1)
-
+IF(WOLFSSL_X86_64_BUILD)
LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/cpuid.c)
IF(MSVC)
+ SET(WOLFSSL_AESNI 1)
LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/aes_asm.asm)
- SET(WOLFSSL_X86_64_BUILD 1)
- SET(HAVE_INTEL_RDSEED 1)
- SET(HAVE_INTEL_RDRAND 1)
IF(CMAKE_C_COMPILER_ID MATCHES Clang)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes -msse4.2 -mpclmul -mrdrnd -mrdseed")
ENDIF()
- ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64")
+ ELSEIF(WOLFSSL_INTELASM)
+ SET(WOLFSSL_AESNI 1)
SET(USE_INTEL_SPEEDUP 1)
LIST(APPEND WOLFCRYPT_SOURCES
- ${WOLFCRYPT_SRCDIR}/aes_asm.S
- ${WOLFCRYPT_SRCDIR}/sha512_asm.S
- ${WOLFCRYPT_SRCDIR}/sha256_asm.S)
- ADD_DEFINITIONS(-maes -msse4 -mpclmul)
- IF(have_C__mrdrnd)
- SET(HAVE_INTEL_RDRAND 1)
- ADD_DEFINITIONS(-mrdrnd)
- ENDIF()
- IF(have_C__mrdseed)
- SET(HAVE_INTEL_RDSEED 1)
- ADD_DEFINITIONS(-mrdseed)
- ENDIF()
+ ${WOLFCRYPT_SRCDIR}/aes_asm.S
+ ${WOLFCRYPT_SRCDIR}/sha512_asm.S
+ ${WOLFCRYPT_SRCDIR}/sha256_asm.S)
+ ADD_DEFINITIONS(-maes -msse4.2 -mpclmul)
ENDIF()
ENDIF()
diff --git a/extra/wolfssl/wolfssl b/extra/wolfssl/wolfssl
-Subproject e116c89a58af750421d82ece13f80516d2bde02
+Subproject 9c87f979a7f1d3a6d786b260653d566c1d31a1c
diff --git a/include/byte_order_generic.h b/include/byte_order_generic.h
index 8381941b9b9..d2b729a241d 100644
--- a/include/byte_order_generic.h
+++ b/include/byte_order_generic.h
@@ -28,10 +28,10 @@
(((uint32) (uchar) (A)[2]) << 16) |\
(((uint32) (uchar) (A)[1]) << 8) | \
((uint32) (uchar) (A)[0])))
-#define sint4korr(A) (int32) (((int32) ((uchar) (A)[0])) |\
- (((int32) ((uchar) (A)[1]) << 8)) |\
- (((int32) ((uchar) (A)[2]) << 16)) |\
- (((int32) ((int16) (A)[3]) << 24)))
+#define sint4korr(A) (int32) (((uint32) ((uchar) (A)[0])) |\
+ (((uint32) ((uchar) (A)[1]) << 8)) |\
+ (((uint32) ((uchar) (A)[2]) << 16)) |\
+ (((uint32) ((uchar) (A)[3]) << 24)))
#define sint8korr(A) (longlong) uint8korr(A)
#define uint2korr(A) (uint16) (((uint16) ((uchar) (A)[0])) |\
((uint16) ((uchar) (A)[1]) << 8))
diff --git a/include/byte_order_generic_x86.h b/include/byte_order_generic_x86.h
index 72e00be8c2c..c47564478c6 100644
--- a/include/byte_order_generic_x86.h
+++ b/include/byte_order_generic_x86.h
@@ -17,6 +17,7 @@
/*
Optimized function-like macros for the x86 architecture (_WIN32 included).
*/
+
#define sint2korr(A) (*((const int16 *) (A)))
#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
(((uint32) 255L << 24) | \
diff --git a/include/json_lib.h b/include/json_lib.h
index b6add6d13a3..bb649928eaa 100644
--- a/include/json_lib.h
+++ b/include/json_lib.h
@@ -172,6 +172,7 @@ enum json_states {
enum json_value_types
{
+ JSON_VALUE_UNINITALIZED=0,
JSON_VALUE_OBJECT=1,
JSON_VALUE_ARRAY=2,
JSON_VALUE_STRING=3,
diff --git a/include/my_base.h b/include/my_base.h
index 44af7b45075..767da14b4d6 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 1995, 2018, MariaDB Corporation.
+ Copyright (c) 1995, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -213,9 +213,7 @@ enum ha_extra_function {
/** Start writing rows during ALTER TABLE...ALGORITHM=COPY. */
HA_EXTRA_BEGIN_ALTER_COPY,
/** Finish writing rows during ALTER TABLE...ALGORITHM=COPY. */
- HA_EXTRA_END_ALTER_COPY,
- /** Fake the start of a statement after wsrep_load_data_splitting hack */
- HA_EXTRA_FAKE_START_STMT
+ HA_EXTRA_END_ALTER_COPY
};
/* Compatible option, to be deleted in 6.0 */
diff --git a/include/my_byteorder.h b/include/my_byteorder.h
index c302781d9fc..abdf19a3632 100644
--- a/include/my_byteorder.h
+++ b/include/my_byteorder.h
@@ -31,10 +31,10 @@
format (low byte first). There are 'korr' (assume 'corrector') variants
for integer types, but 'get' (assume 'getter') for floating point types.
*/
-#if defined(__i386__) || defined(_WIN32)
+#if (defined(__i386__) || defined(_WIN32)) && !defined(WITH_UBSAN)
#define MY_BYTE_ORDER_ARCH_OPTIMIZED
#include "byte_order_generic_x86.h"
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) && !defined(WITH_UBSAN)
#include "byte_order_generic_x86_64.h"
#else
#include "byte_order_generic.h"
diff --git a/include/my_compare.h b/include/my_compare.h
index 9ae6c9582fb..bd5dc418f8c 100644
--- a/include/my_compare.h
+++ b/include/my_compare.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2011, Oracle and/or its affiliates.
- Copyright (c) Monty Program Ab; 1991-2011
+ Copyright (c) 1991, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -95,15 +95,16 @@ static inline uchar get_rec_bits(const uchar *ptr, uchar ofs, uint len)
{
uint16 val= ptr[0];
if (ofs + len > 8)
- val|= (uint16)(ptr[1]) << 8;
- return (val >> ofs) & ((1 << len) - 1);
+ val|= (uint16)(((uint) ptr[1]) << 8);
+ return (uchar) ((val >> ofs) & ((1 << len) - 1));
}
static inline void set_rec_bits(uint16 bits, uchar *ptr, uchar ofs, uint len)
{
- ptr[0]= (ptr[0] & ~(((1 << len) - 1) << ofs)) | (bits << ofs);
+ ptr[0]= (uchar) ((ptr[0] & ~(((1 << len) - 1) << ofs)) | (bits << ofs));
if (ofs + len > 8)
- ptr[1]= (ptr[1] & ~((1 << (len - 8 + ofs)) - 1)) | (bits >> (8 - ofs));
+ ptr[1]= (uchar) ((ptr[1] & ~((1 << (len - 8 + ofs)) - 1)) |
+ bits >> (8 - ofs));
}
#define clr_rec_bits(bit_ptr, bit_ofs, bit_len) \
diff --git a/include/my_pthread.h b/include/my_pthread.h
index 81dd63ee331..bc47bb8bad0 100644
--- a/include/my_pthread.h
+++ b/include/my_pthread.h
@@ -197,7 +197,6 @@ static inline int my_sigwait(sigset_t *set, int *sig, int *code)
*code= siginfo.si_code;
return *sig < 0 ? errno : 0;
#else
-#define SI_KERNEL 128
*code= 0;
return sigwait(set, sig);
#endif
diff --git a/include/myisampack.h b/include/myisampack.h
index 6bfe1958fbc..f3c5fe7114a 100644
--- a/include/myisampack.h
+++ b/include/myisampack.h
@@ -30,7 +30,7 @@
#define mi_uint1korr(A) ((uint8)(*A))
#define mi_sint2korr(A) ((int16) (((int16) (((const uchar*) (A))[1])) |\
- ((int16) ((int16) ((const char*) (A))[0]) << 8)))
+ ((int16) ((uint16) ((const uchar*) (A))[0]) << 8)))
#define mi_sint3korr(A) ((int32) (((((const uchar*) (A))[0]) & 128) ? \
(((uint32) 255L << 24) | \
(((uint32) ((const uchar*) (A))[0]) << 16) |\
@@ -39,10 +39,10 @@
(((uint32) ((const uchar*) (A))[0]) << 16) |\
(((uint32) ((const uchar*) (A))[1]) << 8) | \
((uint32) ((const uchar*) (A))[2])))
-#define mi_sint4korr(A) ((int32) (((int32) (((const uchar*) (A))[3])) |\
- ((int32) (((const uchar*) (A))[2]) << 8) |\
- ((int32) (((const uchar*) (A))[1]) << 16) |\
- ((int32) ((int16) ((const char*) (A))[0]) << 24)))
+#define mi_sint4korr(A) ((int32) (((uint32) (((const uchar*) (A))[3])) |\
+ ((uint32) (((const uchar*) (A))[2]) << 8) |\
+ ((uint32) (((const uchar*) (A))[1]) << 16) |\
+ ((uint32) (((const uchar*) (A))[0]) << 24)))
#define mi_sint8korr(A) ((longlong) mi_uint8korr(A))
#define mi_uint2korr(A) ((uint16) (((uint16) (((const uchar*) (A))[1])) |\
((uint16) (((const uchar*) (A))[0]) << 8)))
diff --git a/include/mysql/plugin_ftparser.h b/include/mysql/plugin_ftparser.h
index 99bb6b24f3f..8db8712926f 100644
--- a/include/mysql/plugin_ftparser.h
+++ b/include/mysql/plugin_ftparser.h
@@ -158,7 +158,9 @@ typedef struct st_mysql_ftparser_boolean_info
the word to MySQL full-text index. When parsing a search query,
this function will add the new word to the list of words to search
for. The boolean_info argument can be NULL for all cases except
- when mode is MYSQL_FTPARSER_FULL_BOOLEAN_INFO.
+ when mode is MYSQL_FTPARSER_FULL_BOOLEAN_INFO. A plugin can replace this
+ callback to post-process every parsed word before passing it to the original
+ mysql_add_word function.
ftparser_state: A generic pointer. The plugin can set it to point
to information to be used internally for its own purposes.
diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h
index a040e40ed3c..10c70c790e9 100644
--- a/include/mysql/service_wsrep.h
+++ b/include/mysql/service_wsrep.h
@@ -62,7 +62,6 @@ extern struct wsrep_service_st {
int (*wsrep_thd_retry_counter_func)(const MYSQL_THD thd);
bool (*wsrep_thd_ignore_table_func)(MYSQL_THD thd);
long long (*wsrep_thd_trx_seqno_func)(const MYSQL_THD thd);
- void (*wsrep_thd_auto_increment_variables_func)(THD *thd, unsigned long long *offset, unsigned long long *increment);
my_bool (*wsrep_thd_is_aborting_func)(const MYSQL_THD thd);
void (*wsrep_set_data_home_dir_func)(const char *data_dir);
my_bool (*wsrep_thd_is_BF_func)(const MYSQL_THD thd, my_bool sync);
@@ -87,6 +86,8 @@ extern struct wsrep_service_st {
bool (*wsrep_thd_set_wsrep_aborter_func)(MYSQL_THD bf_thd, MYSQL_THD thd);
void (*wsrep_report_bf_lock_wait_func)(const MYSQL_THD thd,
unsigned long long trx_id);
+ void (*wsrep_thd_kill_LOCK_func)(const MYSQL_THD thd);
+ void (*wsrep_thd_kill_UNLOCK_func)(const MYSQL_THD thd);
} *wsrep_service;
#define MYSQL_SERVICE_WSREP_INCLUDED
@@ -104,11 +105,12 @@ extern struct wsrep_service_st {
#define wsrep_prepare_key_for_innodb(A,B,C,D,E,F,G) wsrep_service->wsrep_prepare_key_for_innodb_func(A,B,C,D,E,F,G)
#define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T)
#define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T)
+#define wsrep_thd_kill_LOCK(T) wsrep_service->wsrep_thd_kill_LOCK_func(T)
+#define wsrep_thd_kill_UNLOCK(T) wsrep_service->wsrep_thd_kill_UNLOCK_func(T)
#define wsrep_thd_query(T) wsrep_service->wsrep_thd_query_func(T)
#define wsrep_thd_retry_counter(T) wsrep_service->wsrep_thd_retry_counter_func(T)
#define wsrep_thd_ignore_table(T) wsrep_service->wsrep_thd_ignore_table_func(T)
#define wsrep_thd_trx_seqno(T) wsrep_service->wsrep_thd_trx_seqno_func(T)
-#define wsrep_thd_auto_increment_variables(T,O,I) wsrep_service->wsrep_thd_auto_increment_variables_func(T,O,I)
#define wsrep_set_data_home_dir(A) wsrep_service->wsrep_set_data_home_dir_func(A)
#define wsrep_thd_is_BF(T,S) wsrep_service->wsrep_thd_is_BF_func(T,S)
#define wsrep_thd_is_aborting(T) wsrep_service->wsrep_thd_is_aborting_func(T)
@@ -148,7 +150,6 @@ extern "C" long long wsrep_xid_seqno(const struct xid_t* xid);
const unsigned char* wsrep_xid_uuid(const struct xid_t* xid);
extern "C" long long wsrep_thd_trx_seqno(const MYSQL_THD thd);
my_bool get_wsrep_recovery();
-void wsrep_thd_auto_increment_variables(THD *thd, unsigned long long *offset, unsigned long long *increment);
bool wsrep_thd_ignore_table(MYSQL_THD thd);
void wsrep_set_data_home_dir(const char *data_dir);
@@ -164,6 +165,9 @@ extern "C" void wsrep_thd_LOCK(const MYSQL_THD thd);
/* Unlock thd wsrep lock */
extern "C" void wsrep_thd_UNLOCK(const MYSQL_THD thd);
+extern "C" void wsrep_thd_kill_LOCK(const MYSQL_THD thd);
+extern "C" void wsrep_thd_kill_UNLOCK(const MYSQL_THD thd);
+
/* Return thd client state string */
extern "C" const char* wsrep_thd_client_state_str(const MYSQL_THD thd);
/* Return thd client mode string */
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 9e5215f29b3..65f686f3063 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -26,7 +26,7 @@
#define HOSTNAME_LENGTH 60
#define SYSTEM_CHARSET_MBMAXLEN 3
#define NAME_CHAR_LEN 64U /* Field/table name length */
-#define USERNAME_CHAR_LENGTH 128U
+#define USERNAME_CHAR_LENGTH 128
#define NAME_LEN (NAME_CHAR_LEN*SYSTEM_CHARSET_MBMAXLEN)
#define USERNAME_LENGTH (USERNAME_CHAR_LENGTH*SYSTEM_CHARSET_MBMAXLEN)
#define DEFINER_CHAR_LENGTH (USERNAME_CHAR_LENGTH + HOSTNAME_LENGTH + 1)
diff --git a/include/service_versions.h b/include/service_versions.h
index 16d21ac40d3..fb7e715f858 100644
--- a/include/service_versions.h
+++ b/include/service_versions.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2009, 2010, Oracle and/or its affiliates.
- Copyright (c) 2012, 2017, MariaDB
+ Copyright (c) 2012, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -41,5 +41,5 @@
#define VERSION_thd_specifics 0x0100
#define VERSION_thd_timezone 0x0100
#define VERSION_thd_wait 0x0100
-#define VERSION_wsrep 0x0202
+#define VERSION_wsrep 0x0400
#define VERSION_json 0x0100
diff --git a/include/ssl_compat.h b/include/ssl_compat.h
index 8cc0e6a9a2b..9f4b6be8d95 100644
--- a/include/ssl_compat.h
+++ b/include/ssl_compat.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2016, 2017 MariaDB Corporation
+ Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -73,19 +73,19 @@
#define EVP_MD_CTX_SIZE sizeof(EVP_MD_CTX)
#endif
-#define OPENSSL_init_ssl(X,Y) SSL_library_init()
#define DH_set0_pqg(D,P,Q,G) ((D)->p= (P), (D)->g= (G))
#define EVP_CIPHER_CTX_buf_noconst(ctx) ((ctx)->buf)
#define EVP_CIPHER_CTX_encrypting(ctx) ((ctx)->encrypt)
#define EVP_CIPHER_CTX_SIZE sizeof(EVP_CIPHER_CTX)
#ifndef HAVE_WOLFSSL
+#define OPENSSL_init_ssl(X,Y) SSL_library_init()
#define EVP_MD_CTX_reset(X) EVP_MD_CTX_cleanup(X)
#define EVP_CIPHER_CTX_reset(X) EVP_CIPHER_CTX_cleanup(X)
-#endif
#define X509_get0_notBefore(X) X509_get_notBefore(X)
#define X509_get0_notAfter(X) X509_get_notAfter(X)
#endif
+#endif
#ifndef TLS1_3_VERSION
#define SSL_CTX_set_ciphersuites(X,Y) 0
diff --git a/libmariadb b/libmariadb
-Subproject e38244220646a7e95c9be22576460aa7a4eb715
+Subproject d19c7c69269fdf4e2af8943dd86c12e4e1664af
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 9fd6eb7805a..42a93f9a38b 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -625,7 +625,8 @@ int init_embedded_server(int argc, char **argv, char **groups)
(void) thr_setconcurrency(concurrency); // 10 by default
- start_handle_manager();
+ if (flush_time && flush_time != ~(ulong) 0L)
+ start_handle_manager();
// FIXME initialize binlog_filter and rpl_filter if not already done
// corresponding delete is in clean_up()
diff --git a/man/mysqlbinlog.1 b/man/mysqlbinlog.1
index 4e02226106b..675f51f7a8c 100644
--- a/man/mysqlbinlog.1
+++ b/man/mysqlbinlog.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLBINLOG\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
+.TH "\FBMYSQLBINLOG\FR" "1" "14 April 2021" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -1091,6 +1091,23 @@ This option is useful for point\-in\-time recovery\&.
.sp -1
.IP \(bu 2.3
.\}
+.\" mysqlbinlog: table option
+.\" table option: mysqlbinlog
+\fB\-\-table\fR,
+\fB\-T\fR
+.sp
+List entries for just this table (local log only)\&.
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
.\" mysqlbinlog: to-last-log option
.\" to-last-log option: mysqlbinlog
\fB\-\-to\-last\-log\fR,
@@ -2107,7 +2124,7 @@ option can be used to prevent this header from being written\&.
.SH "COPYRIGHT"
.br
.PP
-Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc., 2010-2015 MariaDB Foundation
+Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc., 2010-2021 MariaDB Foundation
.PP
This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License.
.PP
diff --git a/man/mysqldump.1 b/man/mysqldump.1
index 705a3cdd7ad..192fd6f8597 100644
--- a/man/mysqldump.1
+++ b/man/mysqldump.1
@@ -2261,7 +2261,7 @@ servers \- remote (federated) servers as \fBCREATE SERVER\fR\&.
.sp -1
.IP \(bu 2.3
.\}
-stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBINSERT\fR/\fBREPLACE INFO\fR statements without (re)creating tables\&.
+stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&.
.RE
.RS 4
.ie n \{\
@@ -2271,7 +2271,7 @@ stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS
.sp -1
.IP \(bu 2.3
.\}
-timezones \- timezone related system tables dumped as \fBINSERT\fR/\fBREPLACE INTO\fR statements without (re)creating tables\&.
+timezones \- timezone related system tables dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&.
.RE
.sp
The format of the output is affected by \fB\-\-replace\fR and \fB\-\-insert\-into\fR\&. The \fB\-\-replace\fR option will output \fBCREATE OR REPLACE\fR
@@ -2281,12 +2281,11 @@ With \fB\-\-system=user\fR (or \fBall\fR), and \fB\-\-replace\fR, SQL is generat
.sp
The \fB\-\-insert\-into\fR option will cause \fBCREATE IF NOT EXIST\fR forms of SQL to generated if available.
.sp
-For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-info\fR have the usual effects.
+For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-into\fR have the usual effects.
.sp
Enabling specific options here will cause the relevant tables in the mysql database to be ignored when dumping the mysql database or \fB\-\-all\-databases\fR\&.
.sp
-Experimentally this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also
-experimentally compatible with MySQL-5\&.7/8\&.0\&. Mappings of implemenation specific grants/plugins isn't always one-to-one however\&.
+To help in migrating from MySQL to MariaDB, this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also experimentally compatible with MySQL-5\&.7/8\&.0. Mappings of implementation specific grants/plugins isn't always one-to-one however between MariaDB and MySQL and will require manual changes\&.
.sp
.RE
.RS 4
diff --git a/mysql-test/collections/smoke_test b/mysql-test/collections/smoke_test
new file mode 100644
index 00000000000..6a9df661c4f
--- /dev/null
+++ b/mysql-test/collections/smoke_test
@@ -0,0 +1,146 @@
+archive.archive
+archive.archive_gis
+archive.partition_archive
+binlog.binlog_base64_flag
+binlog.binlog_database
+binlog.binlog_innodb
+binlog.binlog_parallel_replication_marks_stm_mix
+binlog.binlog_row_mix_innodb_myisam
+binlog.flashback
+binlog_encryption.encrypted_slave
+connect.bin
+connect.bson
+connect.dbf
+connect.dir
+connect.endian
+connect.general
+connect.json
+connect.mysql
+connect.secure_file_priv
+connect.tbl
+csv.csv
+disks.disks
+encryption.encryption_force
+encryption.innodb_encryption_tables
+encryption.tempfiles_encrypted
+federated.federated
+federated.federatedx
+gcol.gcol_select_innodb
+handler.interface
+heap.heap
+innodb.innodb
+innodb.autoinc_persist
+innodb.innodb_defrag_binlog
+innodb.innodb_mysql
+innodb.monitor
+innodb.purge
+innodb.table_flags
+innodb.xa_recovery
+innodb_fts.fulltext
+innodb_gis.geometry
+innodb_gis.rtree
+innodb_zip.innodb-zip
+innodb_zip.page_size
+json.json_no_table
+main.blackhole
+main.bootstrap
+main.compress
+main.connect
+main.ctype_collate
+main.ctype_utf8
+main.default
+main.dyncol
+main.fulltext
+main.function_defaults
+main.gis
+main.grant
+main.handlersocket
+main.information_schema
+main.innodb_ext_key
+main.log_tables
+main.lowercase_fs_off
+main.myisam
+main.mysql_client_test
+main.mysql_protocols
+main.mysql_upgrade
+main.mysqladmin
+main.mysqlbinlog
+main.mysqlcheck
+main.mysqld--defaults-file
+main.mysqldump
+main.mysqlhotcopy_myisam
+main.mysqlshow
+main.mysqlslap
+main.mysqltest
+main.parser
+main.partition
+main.perror
+main.plugin
+main.plugin_auth
+main.pool_of_threads
+main.ps
+main.repair
+main.shutdown
+main.sp
+main.ssl
+main.ssl_compress
+main.stat_tables
+main.statistics
+main.subselect
+main.symlink
+main.temp_table
+main.timezone
+main.type_timestamp_hires
+main.user_var
+main.userstat
+main.variables
+main.view
+main.win
+main.xa
+maria.maria
+mariabackup.full_backup
+metadata_lock_info.table_metadata_lock
+mroonga/storage.variable_version
+mroonga/wrapper.count_star
+multi_source.multisource
+oqgraph.general-innodb
+parts.rpl_partition
+perfschema.selects
+plugins.auth_ed25519
+plugins.cracklib_password_check
+plugins.dialog
+plugins.fulltext_plugin
+plugins.locales
+plugins.pam_cleartext
+plugins.processlist
+plugins.qc_info
+plugins.server_audit
+plugins.simple_password_check
+plugins.sql_error_log
+plugins.two_password_validations
+plugins.unix_socket
+query_response_time.basic
+rocksdb.rocksdb
+roles.definer
+rpl.rpl_gtid_basic
+rpl.rpl_relayrotate
+rpl.rpl_row_blob_innodb
+rpl.rpl_semi_sync_event
+rpl.rpl_sp
+rpl.rpl_stm_binlog_max_cache_size
+rpl.rpl_switch_stm_row_mixed
+sequence.simple
+spider.basic_sql
+spider.ha
+sql_discovery.simple
+sys_vars.sysvars_aria
+sys_vars.sysvars_server_notembedded
+wsrep.variables
+compat/oracle.binlog_ptr_mysqlbinlog
+compat/oracle.sp-package
+compat/maxdb.rpl_mariadb_timestamp
+sql_sequence.mysqldump
+versioning.simple
+versioning.trx_id
+period.versioning
+plugins.multiauth
diff --git a/mysql-test/include/ctype_utf8mb4.inc b/mysql-test/include/ctype_utf8mb4.inc
index 10d4f99efba..d24ee2fafeb 100644
--- a/mysql-test/include/ctype_utf8mb4.inc
+++ b/mysql-test/include/ctype_utf8mb4.inc
@@ -1587,7 +1587,7 @@ drop table t1;
--echo #
--echo # Check strnxfrm() with odd length
--echo #
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
eval create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci) engine $engine;
insert into t1 values ('a'),('b'),('c');
diff --git a/mysql-test/include/galera_no_debug_sync.inc b/mysql-test/include/galera_no_debug_sync.inc
new file mode 100644
index 00000000000..0819144c0b6
--- /dev/null
+++ b/mysql-test/include/galera_no_debug_sync.inc
@@ -0,0 +1,9 @@
+--disable_query_log
+
+--let $galera_have_debug_sync = `SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_debug_sync_waiters'`
+
+--if ($galera_have_debug_sync) {
+ --skip Test requires Galera debug library with no debug_sync functionality
+}
+
+--enable_query_log
diff --git a/mysql-test/include/icp_tests.inc b/mysql-test/include/icp_tests.inc
index aa09b0025d4..66fdc3e754c 100644
--- a/mysql-test/include/icp_tests.inc
+++ b/mysql-test/include/icp_tests.inc
@@ -484,11 +484,11 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
-EXPLAIN SELECT pk, c1 FROM t1 WHERE pk <> 3;
+EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
SET SESSION optimizer_switch='index_condition_pushdown=off';
-SELECT pk, c1 FROM t1 WHERE pk <> 3;
+SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
DROP TABLE t1;
@@ -727,16 +727,16 @@ INSERT INTO t2 VALUES
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
SET SESSION optimizer_switch='index_condition_pushdown=on';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
DROP TABLE t1,t2;
diff --git a/mysql-test/include/not_asan.inc b/mysql-test/include/not_asan.inc
new file mode 100644
index 00000000000..9fc86a8525a
--- /dev/null
+++ b/mysql-test/include/not_asan.inc
@@ -0,0 +1,8 @@
+# This file should only be used with test that finds bugs in ASan that can not
+# be overcome. In normal cases one should fix the bug server/test case or in
+# the worst case add a (temporary?) suppression in asan.supp or lsan.supp
+
+if (`select count(*) from information_schema.system_variables where variable_name='have_sanitizer' and global_value="ASAN"`)
+{
+--skip Can't be run with ASan
+}
diff --git a/mysql-test/include/not_ubsan.inc b/mysql-test/include/not_ubsan.inc
new file mode 100644
index 00000000000..809f505507e
--- /dev/null
+++ b/mysql-test/include/not_ubsan.inc
@@ -0,0 +1,8 @@
+# This file should only be used with test that finds bugs in ASan that can not
+# be overcome. In normal cases one should fix the bug server/test case or in
+# the worst case add a (temporary?) suppression in asan.supp or lsan.supp
+
+if (`select count(*) from information_schema.system_variables where variable_name='have_sanitizer' and global_value="UBSAN"`)
+{
+--skip Can't be run with UBSAN
+}
diff --git a/mysql-test/lib/My/Debugger.pm b/mysql-test/lib/My/Debugger.pm
new file mode 100644
index 00000000000..7331238e1c8
--- /dev/null
+++ b/mysql-test/lib/My/Debugger.pm
@@ -0,0 +1,266 @@
+package My::Debugger;
+
+use strict;
+use warnings;
+use Text::Wrap;
+use Cwd;
+use My::Platform;
+
+# 1. options to support:
+# --xxx[=ARGS]
+# --manual-xxx[=ARGS]
+# --client-xxx[=ARGS]
+# --boot-xxx[=ARGS]
+# TODO --manual-client-xxx[=ARGS]
+# TODO --manual-boot-xxx[=ARGS]
+# TODO --exec-xxx[=ARGS] (for $ENV{MYSQL}, etc)
+#
+# ARGS is a semicolon-separated list of commands for the
+# command file. If the first command starts from '-' it'll
+# be for a command line, not for a command file.
+#
+# 2. terminal to use: xterm
+# TODO MTR_TERM="xterm -title {title} -e {command}"
+#
+# 3. debugger combinations are *not allowed*
+# (thus no --valgrind --gdb)
+#
+# 4. variables for the command line / file templates:
+# {vardir} -> vardir
+# {exe} -> /path/to/binary/to/execute
+# {args} -> command-line arguments, "-quoted
+# {input}
+# {type} -> client, mysqld.1, etc
+# {script} -> vardir/tmp/{debugger}init.$type
+# {log} -> vardir/log/$type.{debugger}
+# {options} -> user options for the debugger.
+#
+# if {options} isn't used, they're auto-placed before {exe}
+# or at the end if no {exe}
+
+my %debuggers = (
+ gdb => {
+ term => 1,
+ options => '-x {script} {exe}',
+ script => 'set args {args} < {input}',
+ },
+ ddd => {
+ options => '--command {script} {exe}',
+ script => 'set args {args} < {input}',
+ },
+ dbx => {
+ term => 1,
+ options => '-c "stop in main; run {exe} {args} < {input}"',
+ },
+ devenv => {
+ options => '/debugexe {exe} {args}',
+ },
+ windbg => {
+ options => '{exe} {args}',
+ },
+ lldb => {
+ term => 1,
+ options => '-s {script} {exe}',
+ script => 'process launch --stop-at-entry -- {args}',
+ },
+ valgrind => {
+ options => '--tool=memcheck --show-reachable=yes --leak-check=yes --num-callers=16 --quiet --suppressions='.cwd().'/valgrind.supp {exe} {args} --loose-wait-for-pos-timeout=1500',
+ pre => sub {
+ my $debug_libraries_path= "/usr/lib/debug";
+ $ENV{LD_LIBRARY_PATH} .= ":$debug_libraries_path" if -d $debug_libraries_path;
+ }
+ },
+ strace => {
+ options => '-f -o {log} {exe} {args}',
+ },
+ rr => {
+ options => '_RR_TRACE_DIR={log} rr record {exe} {args}',
+ run => 'env',
+ pre => sub {
+ ::mtr_error('rr requires kernel.perf_event_paranoid <= 1')
+ if ::mtr_grab_file('/proc/sys/kernel/perf_event_paranoid') > 1;
+ }
+ },
+ valgdb => {
+ term => 1,
+ run => 'gdb',
+ options => '-x {script} {exe}',
+ script => <<EEE,
+py
+import subprocess,shlex,time
+valg=subprocess.Popen(shlex.split("""valgrind --tool=memcheck --show-reachable=yes --leak-check=yes --num-callers=16 --quiet --suppressions=valgrind.supp --vgdb-error=0 {exe} {args} --loose-wait-for-pos-timeout=1500"""))
+time.sleep(2)
+gdb.execute("target remote | /usr/lib64/valgrind/../../bin/vgdb --pid=" + str(valg.pid))
+EEE
+ pre => sub {
+ my $debug_libraries_path= "/usr/lib/debug";
+ $ENV{LD_LIBRARY_PATH} .= ":$debug_libraries_path" if -d $debug_libraries_path;
+ }
+ },
+
+ # aliases
+ vsjitdebugger => 'windbg',
+ ktrace => 'strace',
+);
+
+my %opts;
+my %opt_vals;
+my $help = "\n\nOptions for running debuggers\n\n";
+
+for my $k (sort keys %debuggers) {
+ my $v = $debuggers{$k};
+ $v = $debuggers{$k} = $debuggers{$v} if not ref $v; # resolve aliases
+
+ sub register_opt($$) {
+ my ($name, $msg) = @_;
+ $opts{"$name=s"} = \$opt_vals{$name};
+ $help .= wrap(sprintf(" %-23s", $name), ' 'x25, "$msg under $name\n");
+ }
+
+ $v->{script} = '' unless $v->{script};
+ $v->{options} =~ s/(\{exe\}|$)/ {options} $&/ unless $v->{options} =~ /\{options\}/;
+
+ register_opt "$k" => "Start mysqld";
+ register_opt "client-$k" => "Start mysqltest client";
+ register_opt "boot-$k" => "Start bootstrap server";
+ register_opt "manual-$k" => "Before running test(s) let user manually start mysqld";
+}
+
+sub subst($%) {
+ use warnings FATAL => 'uninitialized';
+ my ($templ, %vars) = @_;
+ $templ =~ s/\{(\w+)\}/$vars{$1}/g;
+ $templ;
+}
+
+sub do_args($$$$$) {
+ my ($args, $exe, $input, $type, $opt) = @_;
+ my $k = $opt =~ /^(?:client|boot|manual)-(.*)$/ ? $1 : $opt;
+ my $v = $debuggers{$k};
+
+ # on windows mtr args are quoted (for system), otherwise not (for exec)
+ sub quote($) { $_[0] =~ / / ? "\"$_[0]\"" : $_[0] }
+ sub unquote($) { $_[0] =~ s/^"(.*)"$/$1/; $_[0] }
+ sub quote_from_mtr($) { IS_WINDOWS() ? $_[0] : quote($_[0]) }
+ sub unquote_for_mtr($) { IS_WINDOWS() ? $_[0] : unquote($_[0]) }
+
+ my %vars = (
+ vardir => $::opt_vardir,
+ exe => $$exe,
+ args => join(' ', map { quote_from_mtr $_ } @$$args, '--gdb'),
+ input => $input,
+ script => "$::opt_vardir/tmp/${k}init.$type",
+ log => "$::opt_vardir/log/$type.$k",
+ options => '',
+ );
+ my @params = split /;/, $opt_vals{$opt};
+ $vars{options} = shift @params if @params and $params[0] =~ /^-/;
+
+ my $script = join "\n", @params;
+ if ($v->{script}) {
+ ::mtr_tofile($vars{script}, subst($v->{script}, %vars)."\n".$script);
+ } elsif ($script) {
+ die "$k is not using a script file, nowhere to write the script \n---\n$script\n---\n";
+ }
+
+ my $options = subst($v->{options}, %vars);
+ @$$args = map { unquote_for_mtr $_ } $options =~ /("[^"]+"|\S+)/g;
+ my $run = $v->{run} || $k;
+
+ if ($opt =~ /^manual-/) {
+ print "\nTo start $k for $type, type in another window:\n";
+ print "$run $options\n";
+ $$exe= undef; # Indicate the exe should not be started
+ } elsif ($v->{term}) {
+ unshift @$$args, '-title', $type, '-e', $run;
+ $$exe = 'xterm';
+ } else {
+ $$exe = $run;
+ }
+}
+
+sub options() { %opts }
+sub help() { $help }
+
+sub fix_options(@) {
+ my $re=join '|', keys %opts;
+ $re =~ s/=s//g;
+ map { $_ . (/^--($re)$/ and '=;') } @_;
+}
+
+sub pre_setup() {
+ my $used;
+ for my $k (keys %debuggers) {
+ for my $opt ($k, "manual-$k", "boot-$k", "client-$k") {
+ if ($opt_vals{$opt})
+ {
+ $used = 1;
+ if ($debuggers{$k}->{pre}) {
+ $debuggers{$k}->{pre}->();
+ delete $debuggers{$k}->{pre};
+ }
+ }
+ }
+ }
+
+ if ($used) {
+ $ENV{ASAN_OPTIONS}= 'abort_on_error=1:'.($ENV{ASAN_OPTIONS} || '');
+ ::mtr_error("Can't use --extern when using debugger") if $ENV{USE_RUNNING_SERVER};
+
+ $::opt_retry= 1;
+ $::opt_retry_failure= 1;
+ $::opt_testcase_timeout= 7 * 24 * 60; # in minutes
+ $::opt_suite_timeout= 7 * 24 * 60; # in minutes
+ $::opt_shutdown_timeout= 24 * 60 *60; # in seconds
+ $::opt_start_timeout= 24 * 60 * 60; # in seconds
+ }
+}
+
+sub setup_boot_args($$$) {
+ my ($args, $exe, $input) = @_;
+ my $found;
+
+ for my $k (keys %debuggers) {
+ if ($opt_vals{"boot-$k"}) {
+ die "--boot-$k and --$found cannot be used at the same time\n" if $found;
+
+ $found="boot-$k";
+ do_args($args, $exe, $input, 'bootstrap', $found);
+ }
+ }
+}
+
+sub setup_client_args($$) {
+ my ($args, $exe) = @_;
+ my $found;
+ my $embedded = $::opt_embedded_server ? ' with --embedded' : '';
+
+ for my $k (keys %debuggers) {
+ my @opt_names=("client-$k");
+ push @opt_names, $k if $embedded;
+ for my $opt (@opt_names) {
+ if ($opt_vals{$opt}) {
+ die "--$opt and --$found cannot be used at the same time$embedded\n" if $found;
+ $found=$opt;
+ do_args($args, $exe, IS_WINDOWS() ? 'NUL' : '/dev/null', 'client', $found);
+ }
+ }
+ }
+}
+
+sub setup_args($$$) {
+ my ($args, $exe, $type) = @_;
+ my $found;
+
+ for my $k (keys %debuggers) {
+ for my $opt ($k, "manual-$k") {
+ if ($opt_vals{$opt}) {
+ die "--$opt and --$found cannot be used at the same time\n" if $found;
+ $found=$opt;
+ do_args($args, $exe, IS_WINDOWS() ? 'NUL' : '/dev/null', $type, $found);
+ }
+ }
+ }
+}
+
+1;
diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm
index 473b21441e2..8144a6ef02e 100644
--- a/mysql-test/lib/mtr_report.pm
+++ b/mysql-test/lib/mtr_report.pm
@@ -497,23 +497,21 @@ sub mtr_report_stats ($$$$) {
$test_time = sprintf("%.3f", $test->{timer} / 1000);
$test->{'name'} =~ s/$current_suite\.//;
- my $test_result;
-
- # if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
- if ($test->{'retries'} > 0) {
- $test_result = "MTR_RES_FAILED";
+ my $combinations;
+ if (defined($test->{combinations})){
+ $combinations = join ',', sort @{$test->{combinations}};
} else {
- $test_result = $test->{'result'};
+ $combinations = "";
}
- $xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" status="$test_result" time="$test_time");
+ $xml_report .= qq(\t\t<testcase assertions="" classname="$current_suite" name="$test->{'name'}" ).
+ qq(status="$test->{'result'}" time="$test_time" combinations="$combinations");
- my $comment = $test->{'comment'};
- $comment =~ s/[\"]//g;
+ my $comment= replace_special_symbols($test->{'comment'});
- # if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML
- if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) {
+ if ($test->{'result'} eq "MTR_RES_FAILED") {
my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'};
+ $logcontents= $logcontents.$test->{'warnings'}."\n";
# remove any double ] that would end the cdata
$logcontents =~ s/]]/\x{fffd}/g;
# replace wide characters that aren't allowed in XML 1.0
@@ -576,6 +574,16 @@ sub mtr_print_line () {
print '-' x 74 . "\n";
}
+sub replace_special_symbols($) {
+ my $text= shift;
+ $text =~ s/&/&#38;/g;
+ $text =~ s/'/&#39;/g;
+ $text =~ s/"/&#34;/g;
+ $text =~ s/</&lt;/g;
+ $text =~ s/>/&gt;/g;
+ return $text;
+}
+
sub mtr_print_thick_line {
my $char= shift || '=';
diff --git a/mysql-test/main/alter_table.result b/mysql-test/main/alter_table.result
index 908435710a3..3c59b83f2ce 100644
--- a/mysql-test/main/alter_table.result
+++ b/mysql-test/main/alter_table.result
@@ -2529,6 +2529,23 @@ ALTER TABLE t1 ALTER COLUMN k1 SET DEFAULT (SELECT 1 FROM t2 limit 1);
ERROR HY000: Function or expression 'select ...' cannot be used in the DEFAULT clause of `k1`
DROP TABLE t1,t2;
#
+# MDEV-25403 ALTER TABLE wrongly checks for field's default value if AFTER is used
+#
+create table t1(t int, d date not null);
+insert into t1 values (1,'2001-1-1');
+set sql_mode = "no_zero_date";
+alter table t1 change d d date not null after t, add i int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `t` int(11) DEFAULT NULL,
+ `d` date NOT NULL,
+ `i` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t1 add x date not null;
+ERROR 22007: Incorrect date value: '0000-00-00' for column `test`.`t1`.`x` at row 1
+drop table t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/alter_table.test b/mysql-test/main/alter_table.test
index ce461b2fe46..fa931fcadbf 100644
--- a/mysql-test/main/alter_table.test
+++ b/mysql-test/main/alter_table.test
@@ -2051,6 +2051,18 @@ ALTER TABLE t1 ALTER COLUMN k1 SET DEFAULT (SELECT 1 FROM t2 limit 1);
DROP TABLE t1,t2;
--echo #
+--echo # MDEV-25403 ALTER TABLE wrongly checks for field's default value if AFTER is used
+--echo #
+create table t1(t int, d date not null);
+insert into t1 values (1,'2001-1-1');
+set sql_mode = "no_zero_date";
+alter table t1 change d d date not null after t, add i int;
+show create table t1;
+--error ER_TRUNCATED_WRONG_VALUE
+alter table t1 add x date not null;
+drop table t1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/backup_lock_binlog.result b/mysql-test/main/backup_lock_binlog.result
new file mode 100644
index 00000000000..adf960a9cb1
--- /dev/null
+++ b/mysql-test/main/backup_lock_binlog.result
@@ -0,0 +1,40 @@
+#
+# MDEV-25334 FTWRL/Backup blocks DDL on temporary tables with binlog
+# enabled assertion fails in Diagnostics_area::set_error_status
+#
+select @@binlog_format;
+@@binlog_format
+MIXED
+connect con1,localhost,root,,;
+connection default;
+#
+# Test 1
+#
+CREATE TEMPORARY TABLE tmp (a INT);
+connection con1;
+FLUSH TABLES WITH READ LOCK;
+connection default;
+SET lock_wait_timeout= 1;
+ALTER TABLE tmp;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+unlock tables;
+connection default;
+drop table tmp;
+#
+# Test 2 (In statement format to ensure temporary table gets logged)
+#
+set @@binlog_format=statement;
+CREATE TEMPORARY TABLE tmp (a INT);
+connection con1;
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_COMMIT;
+connection default;
+SET lock_wait_timeout= 1;
+ALTER TABLE tmp;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+BACKUP STAGE end;
+connection default;
+drop table tmp;
+disconnect con1;
diff --git a/mysql-test/main/backup_lock_binlog.test b/mysql-test/main/backup_lock_binlog.test
new file mode 100644
index 00000000000..45b3f1cfbd9
--- /dev/null
+++ b/mysql-test/main/backup_lock_binlog.test
@@ -0,0 +1,49 @@
+--source include/have_binlog_format_mixed_or_statement.inc
+
+#
+# Tests involving locks and binlog
+#
+
+--echo #
+--echo # MDEV-25334 FTWRL/Backup blocks DDL on temporary tables with binlog
+--echo # enabled assertion fails in Diagnostics_area::set_error_status
+--echo #
+
+select @@binlog_format;
+--connect (con1,localhost,root,,)
+connection default;
+
+--echo #
+--echo # Test 1
+--echo #
+
+CREATE TEMPORARY TABLE tmp (a INT);
+--connection con1
+FLUSH TABLES WITH READ LOCK;
+--connection default
+SET lock_wait_timeout= 1;
+--error ER_LOCK_WAIT_TIMEOUT
+ALTER TABLE tmp;
+--connection con1
+unlock tables;
+--connection default
+drop table tmp;
+
+--echo #
+--echo # Test 2 (In statement format to ensure temporary table gets logged)
+--echo #
+
+set @@binlog_format=statement;
+CREATE TEMPORARY TABLE tmp (a INT);
+--connection con1
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_COMMIT;
+--connection default
+SET lock_wait_timeout= 1;
+--error ER_LOCK_WAIT_TIMEOUT
+ALTER TABLE tmp;
+--connection con1
+BACKUP STAGE end;
+--connection default
+drop table tmp;
+--disconnect con1
diff --git a/mysql-test/main/brackets.result b/mysql-test/main/brackets.result
index dedd9a2a2bf..c221d64fd17 100644
--- a/mysql-test/main/brackets.result
+++ b/mysql-test/main/brackets.result
@@ -4397,7 +4397,7 @@ with t as (select * from t1 where a <=3)
select * from t;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `t1`.`a` AS `a` from `t1` where `t1`.`a` <= 3)select `t`.`a` AS `a` from `t` latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` <= 3)select `t`.`a` AS `a` from `t` latin1 latin1_swedish_ci
select * from v1;
a
3
@@ -4413,7 +4413,7 @@ order by a desc limit 3 )
select a from t1 where a=4 union select a from t where a=7;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `t1`.`a` AS `a` from `t1` where `t1`.`a` < 3 union select `t1`.`a` AS `a` from `t1` where `t1`.`a` > 3 order by `a` desc limit 3)select `t1`.`a` AS `a` from `t1` where `t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3 order by `a` desc limit 3)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 latin1 latin1_swedish_ci
select * from v1;
a
4
@@ -4429,7 +4429,7 @@ order by a desc limit 3 )
(select a from t1 where a=4 union select a from t where a=7 order by a desc);
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as ((select `t1`.`a` AS `a` from `t1` where `t1`.`a` < 3) union (select `t1`.`a` AS `a` from `t1` where `t1`.`a` > 3) order by `a` desc limit 3)select `t1`.`a` AS `a` from `t1` where `t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as ((select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3) union (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3) order by `a` desc limit 3)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
7
@@ -4444,7 +4444,7 @@ order by a desc limit 3 )
(select a from t where a=4 union select a from t where a=7 order by a desc);
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as ((select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3) union (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3) order by `a` desc limit 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 4 union select `t`.`a` AS `a` from ((select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3) union (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3) order by `a` desc limit 3) `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as ((select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3) union (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3) order by `a` desc limit 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
7
@@ -4469,7 +4469,7 @@ order by 1 desc limit 3 )
select a from t1 where a=4 union select a from t where a=7 order by a desc;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t(a) as (values (2),(1) union (values (4),(7)) order by 1 desc limit 3)select `t1`.`a` AS `a` from `t1` where `t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t(a) as (values (2),(1) union (values (4),(7)) order by 1 desc limit 3)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 4 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
7
@@ -4484,7 +4484,7 @@ order by 1 desc limit 3 )
select a from t1 where a=1 union select a from t where a=7 order by a desc;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t(a) as ((values (2),(1)) union (values (4),(7) order by 1 desc) order by 1 desc limit 3)select `t1`.`a` AS `a` from `t1` where `t1`.`a` = 1 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t(a) as ((values (2),(1)) union (values (4),(7) order by 1 desc) order by 1 desc limit 3)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 union select `t`.`a` AS `a` from `t` where `t`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
7
@@ -4496,7 +4496,7 @@ s as (select * from t1 where a > 3)
select a from t where a=1 union select a from s where a=7 order by a desc;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `t1`.`a` AS `a` from `t1` where `t1`.`a` < 3), s as (select `t1`.`a` AS `a` from `t1` where `t1`.`a` > 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 1 union select `s`.`a` AS `a` from `s` where `s`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3), s as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 1 union select `s`.`a` AS `a` from `s` where `s`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
7
@@ -4508,7 +4508,7 @@ s as (select * from t where a > 3)
select a from t where a=1 union select a from s where a=7 order by a desc;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3), s as (select `t`.`a` AS `a` from (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3) `t` where `t`.`a` > 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 1 union select `s`.`a` AS `a` from `s` where `s`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3), s as (select `t`.`a` AS `a` from `t` where `t`.`a` > 3)select `t`.`a` AS `a` from `t` where `t`.`a` = 1 union select `s`.`a` AS `a` from `s` where `s`.`a` = 7 order by `a` desc latin1 latin1_swedish_ci
select * from v1;
a
1
diff --git a/mysql-test/main/check_constraint.result b/mysql-test/main/check_constraint.result
index 3511af84166..f851b99e5c1 100644
--- a/mysql-test/main/check_constraint.result
+++ b/mysql-test/main/check_constraint.result
@@ -235,3 +235,15 @@ a b
insert t1 (b) values (1);
ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1`
drop table t1;
+#
+# MDEV-24274 ALTER TABLE with CHECK CONSTRAINTS gives "Out of Memory" error
+#
+create table t1 (id varchar(2), constraint id check (id regexp '[a-z]'));
+alter table t1 force;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` varchar(2) DEFAULT NULL,
+ CONSTRAINT `id` CHECK (`id` regexp '[a-z]')
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
diff --git a/mysql-test/main/check_constraint.test b/mysql-test/main/check_constraint.test
index 93538fd1666..1258a9e3be6 100644
--- a/mysql-test/main/check_constraint.test
+++ b/mysql-test/main/check_constraint.test
@@ -176,3 +176,11 @@ select * from t1 where a is null;
--error ER_CONSTRAINT_FAILED
insert t1 (b) values (1);
drop table t1;
+
+--echo #
+--echo # MDEV-24274 ALTER TABLE with CHECK CONSTRAINTS gives "Out of Memory" error
+--echo #
+create table t1 (id varchar(2), constraint id check (id regexp '[a-z]'));
+alter table t1 force;
+show create table t1;
+drop table t1;
diff --git a/mysql-test/main/contributors.result b/mysql-test/main/contributors.result
index 3e4bf5f0d43..0c7ca03a2c5 100644
--- a/mysql-test/main/contributors.result
+++ b/mysql-test/main/contributors.result
@@ -1,21 +1,16 @@
SHOW CONTRIBUTORS;
Name Location Comment
-Booking.com https://www.booking.com Founding member, Platinum Sponsor of the MariaDB Foundation
Alibaba Cloud https://www.alibabacloud.com/ Platinum Sponsor of the MariaDB Foundation
Tencent Cloud https://cloud.tencent.com Platinum Sponsor of the MariaDB Foundation
Microsoft https://microsoft.com/ Platinum Sponsor of the MariaDB Foundation
MariaDB Corporation https://mariadb.com Founding member, Platinum Sponsor of the MariaDB Foundation
+ServiceNow https://servicenow.com Platinum Sponsor of the MariaDB Foundation
Visma https://visma.com Gold Sponsor of the MariaDB Foundation
DBS https://dbs.com Gold Sponsor of the MariaDB Foundation
IBM https://www.ibm.com Gold Sponsor of the MariaDB Foundation
-Tencent Games http://game.qq.com/ Gold Sponsor of the MariaDB Foundation
-Nexedi https://www.nexedi.com Silver Sponsor of the MariaDB Foundation
-Acronis https://www.acronis.com Silver Sponsor of the MariaDB Foundation
-Verkkokauppa.com https://www.verkkokauppa.com Bronze Sponsor of the MariaDB Foundation
-Virtuozzo https://virtuozzo.com Bronze Sponsor of the MariaDB Foundation
-Tencent Game DBA http://tencentdba.com/about Bronze Sponsor of the MariaDB Foundation
-Tencent TDSQL http://tdsql.org Bronze Sponsor of the MariaDB Foundation
-Percona https://www.percona.com/ Bronze Sponsor of the MariaDB Foundation
+Automattic https://automattic.com Silver Sponsor of the MariaDB Foundation
+Percona https://www.percona.com/ Sponsor of the MariaDB Foundation
+Galera Cluster https://galeracluster.com Sponsor of the MariaDB Foundation
Google USA Sponsoring encryption, parallel replication and GTID
Facebook USA Sponsoring non-blocking API, LIMIT ROWS EXAMINED etc
Ronald Bradford Brisbane, Australia EFF contribution for UC2006 Auction
diff --git a/mysql-test/main/create.result b/mysql-test/main/create.result
index 2905103b707..39ede1f90b9 100644
--- a/mysql-test/main/create.result
+++ b/mysql-test/main/create.result
@@ -1,7 +1,4 @@
call mtr.add_suppression("table or database name 't-1'");
-drop table if exists t1,t2,t3,t4,t5;
-drop database if exists mysqltest;
-drop view if exists v1;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -1225,7 +1222,7 @@ drop table if exists t1,t2,t3;
# Fix modified for MariaDB: we support this syntax
create table t1 (a int) transactional=0;
Warnings:
-Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1'
+Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=0'
create table t2 (a int) page_checksum=1;
create table t3 (a int) row_format=page;
drop table t1,t2,t3;
@@ -2001,19 +1998,42 @@ alter table t1 add
key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy';
ERROR HY000: Cannot create table `t1`: index information is too long. Decrease number of indexes or use shorter index names or shorter comments.
drop table t1;
-End of 5.5 tests
+#
+# End of 5.5 tests
+#
+#
+# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
+#
create table t1;
ERROR 42000: A table must have at least 1 column
+#
+# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
+#
create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j;
Warnings:
Note 1831 Duplicate index `i_2`. This is deprecated and will be disallowed in a future release
drop table t1;
+#
+# End of 10.0 tests
+#
+#
+# MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE
+#
+create table t1 (c int(10) unsigned) engine=memory transactional=0;
+ERROR HY000: Table storage engine 'MEMORY' does not support the create option 'TRANSACTIONAL=0'
+#
+# End of 10.2 tests
+#
+#
+# MDEV-17544 No warning when trying to name a primary key constraint.
+#
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
Warnings:
Warning 1280 Name 'foo' ignored for PRIMARY key.
DROP TABLE t1;
#
-# 10.4 Test
+# End of 10.3 tests
+#
#
# MDEV-21017: Assertion `!is_set() || (m_status == DA_OK_BULK &&
# is_bulk_op())' failed or late ER_PERIOD_FIELD_WRONG_ATTRIBUTES
@@ -2026,4 +2046,6 @@ e DATE, PERIOD FOR app(s,e));
ERROR HY000: Period field `s` cannot be GENERATED ALWAYS AS
UNLOCK TABLES;
DROP TABLE t1;
-# End of 10.4 Test
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test
index a6a6ba0c782..adac81f02c5 100644
--- a/mysql-test/main/create.test
+++ b/mysql-test/main/create.test
@@ -5,12 +5,6 @@ call mtr.add_suppression("table or database name 't-1'");
# Check some special create statements.
#
---disable_warnings
-drop table if exists t1,t2,t3,t4,t5;
-drop database if exists mysqltest;
-drop view if exists v1;
---enable_warnings
-
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -29,30 +23,30 @@ drop table t1;
# Test of some CREATE TABLE'S that should fail
#
---error 1146
+--error ER_NO_SUCH_TABLE
create table t2 engine=heap select * from t1;
---error 1146
+--error ER_NO_SUCH_TABLE
create table t2 select auto+1 from t1;
drop table if exists t1,t2;
---error 1167
+--error ER_WRONG_KEY_COLUMN
create table t1 (b char(0) not null, index(b));
---error 1163
+--error ER_TABLE_CANT_HANDLE_BLOB
create table t1 (a int not null,b text) engine=heap;
drop table if exists t1;
---error 1075
+--error ER_WRONG_AUTO_KEY
create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap;
--- error 1049
+--error ER_BAD_DB_ERROR
create table not_existing_database.test (a int);
create table `a/a` (a int);
show create table `a/a`;
create table t1 like `a/a`;
drop table `a/a`;
drop table `t1`;
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int);
---error 1059
+--error ER_TOO_LONG_IDENT
create table a (`aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` int);
#
@@ -62,17 +56,17 @@ create table t1 (a datetime default now());
drop table t1;
create table t1 (a datetime on update now());
drop table t1;
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a int default 100 auto_increment);
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a tinyint default 1000);
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a varchar(5) default 'abcdef');
create table t1 (a varchar(5) default 'abcde');
insert into t1 values();
select * from t1;
---error 1067
+--error ER_INVALID_DEFAULT
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
alter table t1 alter column a set default 'abcdef';
drop table t1;
@@ -97,13 +91,13 @@ create table mysqltest.test2$ (a int);
drop table mysqltest.test2$;
drop database mysqltest;
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `` (a int);
---error 1103
+--error ER_WRONG_TABLE_NAME
drop table if exists ``;
---error 1166
+--error ER_WRONG_COLUMN_NAME
create table t1 (`` int);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (i int, index `` (i));
#
@@ -158,13 +152,13 @@ create table t2 (a int) select * from t1;
describe t1;
describe t2;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (a int, a float) select * from t1;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (a int) select a as b, a+1 as b from t1;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (b int) select a as b, a+1 as b from t1;
drop table if exists t1,t2;
@@ -176,7 +170,7 @@ CREATE TABLE t1 (a int not null);
INSERT INTO t1 values (1),(2),(1);
--error ER_DUP_ENTRY
CREATE TABLE t2 (primary key(a)) SELECT * FROM t1;
---error 1146
+--error ER_NO_SUCH_TABLE
SELECT * from t2;
DROP TABLE t1;
DROP TABLE IF EXISTS t2;
@@ -202,7 +196,7 @@ SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
show create table t1;
drop table t1;
---error 1286
+--error ER_UNKNOWN_STORAGE_ENGINE
SET SESSION storage_engine="gemini";
SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
@@ -216,11 +210,11 @@ drop table t1;
#
create table t1 ( k1 varchar(2), k2 int, primary key(k1,k2));
insert into t1 values ("a", 1), ("b", 2);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values ("c", NULL);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (NULL, 3);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (NULL, NULL);
drop table t1;
@@ -262,11 +256,11 @@ drop table t1;
# "Table truncated when creating another table name with Spaces"
#
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `t1 `(a int);
---error 1102
+--error ER_WRONG_DB_NAME
create database `db1 `;
---error 1166
+--error ER_WRONG_COLUMN_NAME
create table t1(`a ` int);
#
@@ -274,11 +268,11 @@ create table t1(`a ` int);
# "Parser permits multiple commas without syntax error"
#
---error 1064
+--error ER_PARSE_ERROR
create table t1 (a int,);
---error 1064
+--error ER_PARSE_ERROR
create table t1 (a int,,b int);
---error 1064
+--error ER_PARSE_ERROR
create table t1 (,b int);
#
@@ -320,13 +314,13 @@ create table t2 like t3;
show create table t2;
select * from t2;
create table t3 like t1;
---error 1050
+--error ER_TABLE_EXISTS_ERROR
create table t3 like mysqltest.t3;
---error 1049
+--error ER_BAD_DB_ERROR
create table non_existing_database.t1 like t1;
--error ER_NO_SUCH_TABLE
create table t4 like non_existing_table;
---error 1050
+--error ER_TABLE_EXISTS_ERROR
create temporary table t3 like t1;
drop table t1, t2, t3;
drop table t3;
@@ -360,7 +354,7 @@ SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
show create table t1;
drop table t1;
---error 1286
+--error ER_UNKNOWN_STORAGE_ENGINE
SET SESSION storage_engine="gemini";
SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
@@ -480,9 +474,9 @@ use test;
# Test for Bug 856 'Naming a key "Primary" causes trouble'
#
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (a int, index `primary` (a));
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (a int, index `PRIMARY` (a));
create table t1 (`primary` int, index(`primary`));
@@ -491,9 +485,9 @@ create table t2 (`PRIMARY` int, index(`PRIMARY`));
show create table t2;
create table t3 (a int);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
alter table t3 add index `primary` (a);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
alter table t3 add index `PRIMARY` (a);
create table t4 (`primary` int);
@@ -548,11 +542,11 @@ drop table t1;
#
# Bug#10413: Invalid column name is not rejected
#
---error 1103
+--error ER_WRONG_TABLE_NAME
create table t1(column.name int);
---error 1103
+--error ER_WRONG_TABLE_NAME
create table t1(test.column.name int);
---error 1102
+--error ER_WRONG_DB_NAME
create table t1(xyz.t1.name int);
create table t1(t1.name int);
create table t2(test.t2.name int);
@@ -591,7 +585,7 @@ drop table if exists test.t1;
create database mysqltest;
use mysqltest;
create view v1 as select 'foo' from dual;
---error 1347
+--error ER_WRONG_OBJECT
create table t1 like v1;
drop view v1;
drop database mysqltest;
@@ -712,7 +706,7 @@ drop table t1, t2;
#
# Bug #15316 SET value having comma not correctly handled
#
---error 1367
+--error ER_ILLEGAL_VALUE_FOR_TYPE
create table t1(a set("a,b","c,d") not null);
# End of 4.1 tests
@@ -914,9 +908,9 @@ INSERT IGNORE INTO t1 (b) VALUES (5);
CREATE TABLE IF NOT EXISTS t2 (a INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY)
SELECT a FROM t1;
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO t2 SELECT a FROM t1;
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO t2 SELECT a FROM t1;
DROP TABLE t1, t2;
@@ -976,24 +970,24 @@ drop table t1,t2;
# Test incorrect database names
#
---error 1102
+--error ER_WRONG_DB_NAME
CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
---error 1102
+--error ER_WRONG_DB_NAME
DROP DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# TODO: enable these tests when RENAME DATABASE is implemented.
-# --error 1049
+# --error ER_BAD_DB_ERROR
# RENAME DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TO a;
-# --error 1102
+# --error ER_WRONG_DB_NAME
# RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# create database mysqltest;
-# --error 1102
+# --error ER_WRONG_DB_NAME
# RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# drop database mysqltest;
---error 1102
+--error ER_WRONG_DB_NAME
USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
---error 1102
+--error ER_WRONG_DB_NAME
SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
#
@@ -1853,28 +1847,46 @@ alter table t1 add
key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy';
drop table t1;
---echo End of 5.5 tests
+--echo #
+--echo # End of 5.5 tests
+--echo #
-#
-# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
-#
+--echo #
+--echo # MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
+--echo #
--error ER_TABLE_MUST_HAVE_COLUMNS
create table t1;
-#
-# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
-#
+--echo #
+--echo # MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
+--echo #
create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j;
drop table t1;
-#
-# MDEV-17544 No warning when trying to name a primary key constraint.
-#
+--echo #
+--echo # End of 10.0 tests
+--echo #
+
+--echo #
+--echo # MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE
+--echo #
+--error ER_ILLEGAL_HA_CREATE_OPTION
+create table t1 (c int(10) unsigned) engine=memory transactional=0;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # MDEV-17544 No warning when trying to name a primary key constraint.
+--echo #
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
DROP TABLE t1;
--echo #
---echo # 10.4 Test
+--echo # End of 10.3 tests
+--echo #
+
--echo #
--echo # MDEV-21017: Assertion `!is_set() || (m_status == DA_OK_BULK &&
--echo # is_bulk_op())' failed or late ER_PERIOD_FIELD_WRONG_ATTRIBUTES
@@ -1890,4 +1902,6 @@ e DATE, PERIOD FOR app(s,e));
UNLOCK TABLES;
DROP TABLE t1;
---echo # End of 10.4 Test
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/cte_nonrecursive.result b/mysql-test/main/cte_nonrecursive.result
index d23618344bd..f50ac50ded9 100644
--- a/mysql-test/main/cte_nonrecursive.result
+++ b/mysql-test/main/cte_nonrecursive.result
@@ -571,7 +571,7 @@ with t as (select a from t1 where b >= 'c')
select * from t2,t where t2.c=t.a;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `t1`.`a` AS `a` from `t1` where `t1`.`b` >= 'c')select `t2`.`c` AS `c`,`t`.`a` AS `a` from (`t2` join `t`) where `t2`.`c` = `t`.`a` latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`b` >= 'c')select `test`.`t2`.`c` AS `c`,`t`.`a` AS `a` from (`test`.`t2` join `t`) where `test`.`t2`.`c` = `t`.`a` latin1 latin1_swedish_ci
select * from v1;
c a
4 4
@@ -588,7 +588,7 @@ with t as (select a, count(*) from t1 where b >= 'c' group by a)
select * from t2,t where t2.c=t.a;
show create view v2;
View Create View character_set_client collation_connection
-v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with t as (select `t1`.`a` AS `a`,count(0) AS `count(*)` from `t1` where `t1`.`b` >= 'c' group by `t1`.`a`)select `t2`.`c` AS `c`,`t`.`a` AS `a`,`t`.`count(*)` AS `count(*)` from (`t2` join `t`) where `t2`.`c` = `t`.`a` latin1 latin1_swedish_ci
+v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with t as (select `test`.`t1`.`a` AS `a`,count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` >= 'c' group by `test`.`t1`.`a`)select `test`.`t2`.`c` AS `c`,`t`.`a` AS `a`,`t`.`count(*)` AS `count(*)` from (`test`.`t2` join `t`) where `test`.`t2`.`c` = `t`.`a` latin1 latin1_swedish_ci
select * from v2;
c a count(*)
4 4 2
@@ -606,7 +606,7 @@ with t(c) as (select a from t1 where b >= 'c')
select * from t r1 where r1.c=4;
show create view v3;
View Create View character_set_client collation_connection
-v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS with t(c) as (select `t1`.`a` AS `c` from `t1` where `t1`.`b` >= 'c')select `r1`.`c` AS `c` from `t` `r1` where `r1`.`c` = 4 latin1 latin1_swedish_ci
+v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS with t(c) as (select `test`.`t1`.`a` AS `c` from `test`.`t1` where `test`.`t1`.`b` >= 'c')select `r1`.`c` AS `c` from `t` `r1` where `r1`.`c` = 4 latin1 latin1_swedish_ci
select * from v3;
c
4
@@ -618,7 +618,7 @@ with t(c) as (select a from t1 where b >= 'c')
select * from t r1, t r2 where r1.c=r2.c and r2.c=4;
show create view v4;
View Create View character_set_client collation_connection
-v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS with t(c) as (select `test`.`t1`.`a` AS `c` from `test`.`t1` where `test`.`t1`.`b` >= 'c')select `r1`.`c` AS `c`,`r2`.`c` AS `d` from (`t` `r1` join (select `test`.`t1`.`a` AS `c` from `test`.`t1` where `test`.`t1`.`b` >= 'c') `r2`) where `r1`.`c` = `r2`.`c` and `r2`.`c` = 4 latin1 latin1_swedish_ci
+v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS with t(c) as (select `test`.`t1`.`a` AS `c` from `test`.`t1` where `test`.`t1`.`b` >= 'c')select `r1`.`c` AS `c`,`r2`.`c` AS `d` from (`t` `r1` join `t` `r2`) where `r1`.`c` = `r2`.`c` and `r2`.`c` = 4 latin1 latin1_swedish_ci
select * from v4;
c d
4 4
@@ -1126,7 +1126,7 @@ NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
NULL UNION RESULT <union11,12> ALL NULL NULL NULL NULL NULL NULL
NULL UNION RESULT <union1,6> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 with cte_e as (with cte_o as (with cte_i as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1)select `cte_e1`.`a` AS `a` from `cte_e` `cte_e1` where `cte_e1`.`a` > 1 union select `cte_e2`.`a` AS `a` from (with cte_o as (with cte_i as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7) `cte_e2`
+Note 1003 with cte_e as (with cte_o as (with cte_i as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1)select `cte_e1`.`a` AS `a` from `cte_e` `cte_e1` where `cte_e1`.`a` > 1 union select `cte_e2`.`a` AS `a` from `cte_e` `cte_e2`
drop table t1;
#
# MDEV-13753: embedded CTE in a VIEW created in prepared statement
@@ -1349,7 +1349,7 @@ r.r_regionkey in
select r_regionkey from t where r_name <> "ASIA");
show create view v;
View Create View character_set_client collation_connection
-v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `n`.`n_nationkey` AS `n_nationkey`,`n`.`n_name` AS `n_name`,`n`.`n_regionkey` AS `n_regionkey`,`r`.`r_regionkey` AS `r_regionkey`,`r`.`r_name` AS `r_name` from (`nation` `n` join `region` `r`) where `n`.`n_regionkey` = `r`.`r_regionkey` and `r`.`r_regionkey` in (with t as (select `region`.`r_regionkey` AS `r_regionkey`,`region`.`r_name` AS `r_name` from `region` where `region`.`r_regionkey` <= 3)select `t`.`r_regionkey` from `t` where `t`.`r_name` <> 'ASIA') latin1 latin1_swedish_ci
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `n`.`n_nationkey` AS `n_nationkey`,`n`.`n_name` AS `n_name`,`n`.`n_regionkey` AS `n_regionkey`,`r`.`r_regionkey` AS `r_regionkey`,`r`.`r_name` AS `r_name` from (`test`.`nation` `n` join `test`.`region` `r`) where `n`.`n_regionkey` = `r`.`r_regionkey` and `r`.`r_regionkey` in (with t as (select `test`.`region`.`r_regionkey` AS `r_regionkey`,`test`.`region`.`r_name` AS `r_name` from `test`.`region` where `test`.`region`.`r_regionkey` <= 3)select `t`.`r_regionkey` from `t` where `t`.`r_name` <> 'ASIA') latin1 latin1_swedish_ci
select * from v;
n_nationkey n_name n_regionkey r_regionkey r_name
0 ALGERIA 0 0 AFRICA
@@ -1690,6 +1690,79 @@ ERROR 3D000: No database selected
DROP TABLE test.t;
connection default;
disconnect con1;
+#
+# MDEV-22781: create view with CTE without default database
+#
+drop database test;
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3),(7),(1);
+create view db1.v1 as with t as (select * from db1.t1) select * from t;
+show create view db1.v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `db1`.`v1` AS with t as (select `db1`.`t1`.`a` AS `a` from `db1`.`t1`)select `t`.`a` AS `a` from `t` latin1 latin1_swedish_ci
+select * from db1.v1;
+a
+3
+7
+1
+drop view db1.v1;
+prepare stmt from "
+create view db1.v1 as with t as (select * from db1.t1) select * from t;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view db1.v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `db1`.`v1` AS with t as (select `db1`.`t1`.`a` AS `a` from `db1`.`t1`)select `t`.`a` AS `a` from `t` latin1 latin1_swedish_ci
+select * from db1.v1;
+a
+3
+7
+1
+drop view db1.v1;
+drop table db1.t1;
+drop database db1;
+create database test;
+use test;
+#
+# MDEV-24597: CTE with union used multiple times in query
+#
+with cte(a) as
+(select 1 as d union select 2 as d)
+select a from cte as r1
+union
+select a from cte as r2;
+a
+1
+2
+create table t1 (a int, b int) engine=myisam;
+insert into t1 values
+(3,30), (7,70), (1,10), (7,71), (2,20), (7,72), (3,33), (4,44),
+(5,50), (4,40), (3,33), (4,42), (4,43), (5,51);
+with cte(c) as
+(select a from t1 where b < 30 union select a from t1 where b > 40)
+select * from cte as r1, cte as r2 where r1.c = r2.c;
+c c
+1 1
+2 2
+7 7
+4 4
+5 5
+with cte(a,c) as
+(
+select a, count(*) from t1 group by a having count(*) = 1
+union
+select a, count(*) from t1 group by a having count(*) = 3
+)
+select a, c from cte as r1 where a < 3
+union
+select a, c from cte as r2 where a > 4;
+a c
+1 1
+2 1
+7 3
+drop table t1;
# End of 10.2 tests
#
# MDEV-21673: several references to CTE that uses
diff --git a/mysql-test/main/cte_nonrecursive.test b/mysql-test/main/cte_nonrecursive.test
index 1f5b2a6b70d..49df2bb9ec0 100644
--- a/mysql-test/main/cte_nonrecursive.test
+++ b/mysql-test/main/cte_nonrecursive.test
@@ -1201,6 +1201,66 @@ DROP TABLE test.t;
--connection default
--disconnect con1
+--echo #
+--echo # MDEV-22781: create view with CTE without default database
+--echo #
+
+drop database test;
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3),(7),(1);
+
+create view db1.v1 as with t as (select * from db1.t1) select * from t;
+show create view db1.v1;
+select * from db1.v1;
+drop view db1.v1;
+
+prepare stmt from "
+create view db1.v1 as with t as (select * from db1.t1) select * from t;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view db1.v1;
+select * from db1.v1;
+drop view db1.v1;
+
+drop table db1.t1;
+drop database db1;
+
+create database test;
+use test;
+
+--echo #
+--echo # MDEV-24597: CTE with union used multiple times in query
+--echo #
+
+with cte(a) as
+(select 1 as d union select 2 as d)
+select a from cte as r1
+union
+select a from cte as r2;
+
+create table t1 (a int, b int) engine=myisam;
+insert into t1 values
+(3,30), (7,70), (1,10), (7,71), (2,20), (7,72), (3,33), (4,44),
+(5,50), (4,40), (3,33), (4,42), (4,43), (5,51);
+
+with cte(c) as
+(select a from t1 where b < 30 union select a from t1 where b > 40)
+select * from cte as r1, cte as r2 where r1.c = r2.c;
+
+with cte(a,c) as
+(
+ select a, count(*) from t1 group by a having count(*) = 1
+ union
+ select a, count(*) from t1 group by a having count(*) = 3
+)
+select a, c from cte as r1 where a < 3
+union
+select a, c from cte as r2 where a > 4;
+
+drop table t1;
+
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/cte_nonrecursive_not_embedded.result b/mysql-test/main/cte_nonrecursive_not_embedded.result
new file mode 100644
index 00000000000..c96a1ec2849
--- /dev/null
+++ b/mysql-test/main/cte_nonrecursive_not_embedded.result
@@ -0,0 +1,48 @@
+#
+# MDEV-20751: query using many CTEs with grant_tables enabled
+#
+connection default;
+CREATE DATABASE db;
+USE db;
+CREATE TABLE t1 (a int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (3), (7), (1);
+CREATE TABLE t2 (a int) ENGINE=MYISAM;
+INSERT INTO t2 VALUES (2), (8), (4);
+CREATE USER 'u1'@'localhost';
+GRANT USAGE ON db.* TO 'u1'@'localhost';
+GRANT SELECT ON db.t1 TO 'u1'@'localhost';
+FLUSH PRIVILEGES;
+connect u1,'localhost',u1,,;
+connection u1;
+USE db;
+WITH
+cte1 AS
+(SELECT a FROM t1),
+cte2 AS
+(SELECT cte1.a FROM t1,cte1 WHERE cte1.a = t1.a),
+cte3 AS
+(SELECT cte2.a FROM t1,cte1,cte2 WHERE cte1.a = t1.a AND t1.a = cte2.a),
+cte4 AS
+(SELECT cte2.a FROM t1,cte2 WHERE cte2.a = t1.a)
+SELECT * FROM cte4 as r;
+a
+3
+7
+1
+WITH
+cte1 AS
+(SELECT a FROM t2),
+cte2 AS
+(SELECT cte1.a FROM t2,cte1 WHERE cte1.a = t2.a),
+cte3 AS
+(SELECT cte2.a FROM t2,cte1,cte2 WHERE cte1.a = t2.a AND t2.a = cte2.a),
+cte4 AS
+(SELECT cte2.a FROM t2,cte2 WHERE cte2.a = t2.a)
+SELECT * FROM cte4 as r;
+ERROR 42000: SELECT command denied to user 'u1'@'localhost' for table 't2'
+disconnect u1;
+connection default;
+DROP USER 'u1'@'localhost';
+DROP DATABASE db;
+USE test;
+# End of 10.2 tests
diff --git a/mysql-test/main/cte_nonrecursive_not_embedded.test b/mysql-test/main/cte_nonrecursive_not_embedded.test
new file mode 100644
index 00000000000..e80baeaf591
--- /dev/null
+++ b/mysql-test/main/cte_nonrecursive_not_embedded.test
@@ -0,0 +1,58 @@
+-- source include/not_embedded.inc
+
+--echo #
+--echo # MDEV-20751: query using many CTEs with grant_tables enabled
+--echo #
+
+--connection default
+
+CREATE DATABASE db;
+USE db;
+
+CREATE TABLE t1 (a int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (3), (7), (1);
+CREATE TABLE t2 (a int) ENGINE=MYISAM;
+INSERT INTO t2 VALUES (2), (8), (4);
+
+
+CREATE USER 'u1'@'localhost';
+GRANT USAGE ON db.* TO 'u1'@'localhost';
+GRANT SELECT ON db.t1 TO 'u1'@'localhost';
+FLUSH PRIVILEGES;
+
+--connect (u1,'localhost',u1,,)
+--connection u1
+USE db;
+
+WITH
+cte1 AS
+(SELECT a FROM t1),
+cte2 AS
+(SELECT cte1.a FROM t1,cte1 WHERE cte1.a = t1.a),
+cte3 AS
+(SELECT cte2.a FROM t1,cte1,cte2 WHERE cte1.a = t1.a AND t1.a = cte2.a),
+cte4 AS
+(SELECT cte2.a FROM t1,cte2 WHERE cte2.a = t1.a)
+SELECT * FROM cte4 as r;
+
+--error ER_TABLEACCESS_DENIED_ERROR
+WITH
+cte1 AS
+(SELECT a FROM t2),
+cte2 AS
+(SELECT cte1.a FROM t2,cte1 WHERE cte1.a = t2.a),
+cte3 AS
+(SELECT cte2.a FROM t2,cte1,cte2 WHERE cte1.a = t2.a AND t2.a = cte2.a),
+cte4 AS
+(SELECT cte2.a FROM t2,cte2 WHERE cte2.a = t2.a)
+SELECT * FROM cte4 as r;
+
+--disconnect u1
+--connection default
+
+DROP USER 'u1'@'localhost';
+DROP DATABASE db;
+
+USE test;
+
+--echo # End of 10.2 tests
diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result
index 960bce44e58..6f30de39b46 100644
--- a/mysql-test/main/cte_recursive.result
+++ b/mysql-test/main/cte_recursive.result
@@ -818,7 +818,7 @@ where p.id = a.father or p.id = a.mother
select * from ancestors;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where `folks`.`name` = 'Me' and `folks`.`dob` = '2000-01-01' union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `a`) where `p`.`id` = `a`.`father` or `p`.`id` = `a`.`mother`)select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where `test`.`folks`.`name` = 'Me' and `test`.`folks`.`dob` = '2000-01-01' union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`test`.`folks` `p` join `ancestors` `a`) where `p`.`id` = `a`.`father` or `p`.`id` = `a`.`mother`)select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci
select * from v1;
id name dob father mother
100 Me 2000-01-01 20 30
@@ -849,7 +849,7 @@ where p.id = ma.mother
select * from ancestors;
show create view v2;
View Create View character_set_client collation_connection
-v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where `folks`.`name` = 'Me' union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `fa`) where `p`.`id` = `fa`.`father` union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `ma`) where `p`.`id` = `ma`.`mother`)select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci
+v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where `test`.`folks`.`name` = 'Me' union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`test`.`folks` `p` join `ancestors` `fa`) where `p`.`id` = `fa`.`father` union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`test`.`folks` `p` join `ancestors` `ma`) where `p`.`id` = `ma`.`mother`)select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci
select * from v2;
id name dob father mother
100 Me 2000-01-01 20 30
@@ -1301,7 +1301,7 @@ select ancestors.name, ancestors.dob from ancestors;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived4> ALL NULL NULL NULL NULL 24
4 DERIVED folks ALL NULL NULL NULL NULL 12 Using where
-6 RECURSIVE UNION <derived3> ALL NULL NULL NULL NULL 12
+6 UNION <derived3> ALL NULL NULL NULL NULL 12
5 RECURSIVE UNION <derived4> ALL NULL NULL NULL NULL 24
NULL UNION RESULT <union4,6,5> ALL NULL NULL NULL NULL NULL
3 DERIVED folks ALL NULL NULL NULL NULL 12 Using where
@@ -4029,7 +4029,7 @@ id select_type table type possible_keys key key_len ref rows Extra
3 RECURSIVE UNION t1 ALL NULL NULL NULL NULL 4 Using where
3 RECURSIVE UNION <derived2> ref key0 key0 9 test.t1.c 2
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
-4 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 4
+4 UNION <derived2> ALL NULL NULL NULL NULL 4
with recursive r_cte as
( select * from t1 as s
union
@@ -4237,6 +4237,269 @@ a b c
deallocate prepare stmt;
drop table t1;
#
+# MDEV-24019: query with recursive CTE when no default database is set
+#
+drop database test;
+with recursive a as
+(select 1 from dual union select * from a as r)
+select * from a;
+1
+1
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3), (7), (1);
+with recursive cte as
+(select * from db1.t1 union select * from (select * from cte) as t)
+select * from cte;
+a
+3
+7
+1
+explain with recursive cte as
+(select * from db1.t1 union select * from (select * from cte) as t)
+select * from cte;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
+2 DERIVED t1 ALL NULL NULL NULL NULL 3
+3 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 3
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
+prepare stmt from "with recursive cte as
+(select * from db1.t1 union select * from (select * from cte) as t)
+select * from cte";
+execute stmt;
+a
+3
+7
+1
+execute stmt;
+a
+3
+7
+1
+deallocate prepare stmt;
+drop database db1;
+create database test;
+use test;
+#
+# MDEV-23406: query with mutually recursive CTEs when big_tables=1
+#
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+create table folks(id int, name char(32), dob date, father int, mother int);
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+union
+select h.*, w.*
+from folks v, folks h, folks w
+where v.name = 'Me' and
+(v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+explain with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+union
+select h.*, w.*
+from folks v, folks h, folks w
+where v.name = 'Me' and
+(v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived3> ALL NULL NULL NULL NULL 1728
+4 DERIVED <derived3> ALL NULL NULL NULL NULL 1728
+5 RECURSIVE UNION <derived3> ALL NULL NULL NULL NULL 1728
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL
+3 DERIVED v ALL NULL NULL NULL NULL 12 Using where
+3 DERIVED h ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
+3 DERIVED w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (incremental, BNL join)
+2 RECURSIVE UNION <derived4> ALL NULL NULL NULL NULL 2
+2 RECURSIVE UNION h ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
+2 RECURSIVE UNION w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (incremental, BNL join)
+NULL UNION RESULT <union3,2> ALL NULL NULL NULL NULL NULL
+prepare stmt from "with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+union
+select h.*, w.*
+from folks v, folks h, folks w
+where v.name = 'Me' and
+(v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples";
+execute stmt;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+execute stmt;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+deallocate prepare stmt;
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select *
+from folks
+where name = 'Me'
+ union all
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union all
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+explain with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select *
+from folks
+where name = 'Me'
+ union all
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union all
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
+3 DERIVED folks ALL NULL NULL NULL NULL 12 Using where
+4 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2
+5 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2
+NULL UNION RESULT <union3,4,5> ALL NULL NULL NULL NULL NULL
+2 DERIVED h ALL NULL NULL NULL NULL 12 Using where
+2 DERIVED <derived3> ref key0 key0 5 test.h.id 2
+2 DERIVED w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
+prepare stmt from "with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select *
+from folks
+where name = 'Me'
+ union all
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union all
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples";
+execute stmt;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+execute stmt;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+deallocate prepare stmt;
+drop table folks;
+set big_tables=@save_big_tables;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test
index 58264baac02..c3537e5bd0c 100644
--- a/mysql-test/main/cte_recursive.test
+++ b/mysql-test/main/cte_recursive.test
@@ -2726,6 +2726,135 @@ deallocate prepare stmt;
drop table t1;
--echo #
+--echo # MDEV-24019: query with recursive CTE when no default database is set
+--echo #
+
+drop database test;
+
+let $q=
+with recursive a as
+ (select 1 from dual union select * from a as r)
+select * from a;
+
+eval $q;
+
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3), (7), (1);
+
+let $q=
+with recursive cte as
+ (select * from db1.t1 union select * from (select * from cte) as t)
+select * from cte;
+
+eval $q;
+eval explain $q;
+
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop database db1;
+
+create database test;
+use test;
+
+--echo #
+--echo # MDEV-23406: query with mutually recursive CTEs when big_tables=1
+--echo #
+
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+
+create table folks(id int, name char(32), dob date, father int, mother int);
+
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+
+let q=
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+ w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+ select h.*, w.*
+ from folks h, folks w, coupled_ancestors a
+ where a.father = h.id AND a.mother = w.id
+ union
+ select h.*, w.*
+ from folks v, folks h, folks w
+ where v.name = 'Me' and
+ (v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+ select h_id, h_name, h_dob, h_father, h_mother
+ from ancestor_couples
+ union
+ select w_id, w_name, w_dob, w_father, w_mother
+ from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+ from ancestor_couples;
+
+eval $q;
+eval explain $q;
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+let $q=
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+ w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+ select h.*, w.*
+ from folks h, folks w, coupled_ancestors a
+ where a.father = h.id AND a.mother = w.id
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+ select *
+ from folks
+ where name = 'Me'
+ union all
+ select h_id, h_name, h_dob, h_father, h_mother
+ from ancestor_couples
+ union all
+ select w_id, w_name, w_dob, w_father, w_mother
+ from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+ from ancestor_couples;
+
+eval $q;
+eval explain $q;
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop table folks;
+
+set big_tables=@save_big_tables;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/ctype_utf16.result b/mysql-test/main/ctype_utf16.result
index 3d634bc2005..3643e170376 100644
--- a/mysql-test/main/ctype_utf16.result
+++ b/mysql-test/main/ctype_utf16.result
@@ -1492,6 +1492,8 @@ ab
AE
AE
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SELECT * FROM t1 ORDER BY s1;
s1
ab
diff --git a/mysql-test/main/ctype_utf16le.result b/mysql-test/main/ctype_utf16le.result
index 00eb559837b..d791d09d7d4 100644
--- a/mysql-test/main/ctype_utf16le.result
+++ b/mysql-test/main/ctype_utf16le.result
@@ -1765,6 +1765,8 @@ ab
AE
AE
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SELECT * FROM t1 ORDER BY s1;
s1
ab
diff --git a/mysql-test/main/ctype_utf32.result b/mysql-test/main/ctype_utf32.result
index 49531570fd2..da71f6eb59c 100644
--- a/mysql-test/main/ctype_utf32.result
+++ b/mysql-test/main/ctype_utf32.result
@@ -1306,7 +1306,7 @@ create table t1 (a varchar(334) character set utf32 primary key);
ERROR 42000: Specified key was too long; max key length is 1000 bytes
create table t1 (a varchar(333) character set utf32, key(a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (repeat('a',333)), (repeat('b',333));
flush tables;
check table t1;
@@ -1505,6 +1505,8 @@ ab
AE
AE
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SELECT * FROM t1 ORDER BY s1;
s1
ab
diff --git a/mysql-test/main/ctype_utf8.result b/mysql-test/main/ctype_utf8.result
index 1ed593d10d4..4ca9b1e4cb7 100644
--- a/mysql-test/main/ctype_utf8.result
+++ b/mysql-test/main/ctype_utf8.result
@@ -6757,9 +6757,11 @@ DFFFDFFF9CFF9DFF9EFF
# Checking strnxfrm() with odd length
#
set max_sort_length=9;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '9'
select @@max_sort_length;
@@max_sort_length
-9
+64
create table t1 (a varchar(128) character set utf8 collate utf8_general_ci);
insert into t1 values ('a'),('b'),('c');
select * from t1 order by a;
diff --git a/mysql-test/main/ctype_utf8mb4.result b/mysql-test/main/ctype_utf8mb4.result
index 691ac51e241..2762873b9c7 100644
--- a/mysql-test/main/ctype_utf8mb4.result
+++ b/mysql-test/main/ctype_utf8mb4.result
@@ -1478,7 +1478,7 @@ a varchar(255) NOT NULL default '',
KEY a (a)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (_utf8mb4 0xe880bd);
insert into t1 values (_utf8mb4 0x5b);
select hex(a) from t1;
@@ -1526,7 +1526,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
check table t1;
@@ -2371,10 +2371,10 @@ drop table t1;
#
# Check strnxfrm() with odd length
#
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
@@max_sort_length
-9
+65
create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci);
insert into t1 values ('a'),('b'),('c');
select * from t1 order by a;
@@ -2726,7 +2726,7 @@ DEFAULT CHARACTER SET utf8,
MODIFY subject varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci,
MODIFY p varchar(255) CHARACTER SET utf8;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/main/ctype_utf8mb4.test b/mysql-test/main/ctype_utf8mb4.test
index 532729dafde..8fbbee872ce 100644
--- a/mysql-test/main/ctype_utf8mb4.test
+++ b/mysql-test/main/ctype_utf8mb4.test
@@ -1520,7 +1520,7 @@ drop table t1;
--echo #
--echo # Check strnxfrm() with odd length
--echo #
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci);
insert into t1 values ('a'),('b'),('c');
diff --git a/mysql-test/main/ctype_utf8mb4_heap.result b/mysql-test/main/ctype_utf8mb4_heap.result
index 4aef2d8cb66..9eb7d48370f 100644
--- a/mysql-test/main/ctype_utf8mb4_heap.result
+++ b/mysql-test/main/ctype_utf8mb4_heap.result
@@ -2203,10 +2203,10 @@ drop table t1;
#
# Check strnxfrm() with odd length
#
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
@@max_sort_length
-9
+65
create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci) engine heap;
insert into t1 values ('a'),('b'),('c');
select * from t1 order by a;
diff --git a/mysql-test/main/ctype_utf8mb4_innodb.result b/mysql-test/main/ctype_utf8mb4_innodb.result
index 3c7d0ba2fb7..fc2a368f3f0 100644
--- a/mysql-test/main/ctype_utf8mb4_innodb.result
+++ b/mysql-test/main/ctype_utf8mb4_innodb.result
@@ -2329,10 +2329,10 @@ drop table t1;
#
# Check strnxfrm() with odd length
#
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
@@max_sort_length
-9
+65
create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci) engine InnoDB;
insert into t1 values ('a'),('b'),('c');
select * from t1 order by a;
diff --git a/mysql-test/main/ctype_utf8mb4_myisam.result b/mysql-test/main/ctype_utf8mb4_myisam.result
index fd8d7adf3a0..ee2bd4431fc 100644
--- a/mysql-test/main/ctype_utf8mb4_myisam.result
+++ b/mysql-test/main/ctype_utf8mb4_myisam.result
@@ -1443,7 +1443,7 @@ a varchar(255) NOT NULL default '',
KEY a (a)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (_utf8mb4 0xe880bd);
insert into t1 values (_utf8mb4 0x5b);
select hex(a) from t1;
@@ -1491,7 +1491,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
check table t1;
@@ -2336,10 +2336,10 @@ drop table t1;
#
# Check strnxfrm() with odd length
#
-set max_sort_length=9;
+set max_sort_length=65;
select @@max_sort_length;
@@max_sort_length
-9
+65
create table t1 (a varchar(128) character set utf8mb4 collate utf8mb4_general_ci) engine MyISAM;
insert into t1 values ('a'),('b'),('c');
select * from t1 order by a;
diff --git a/mysql-test/main/deadlock_ftwrl.result b/mysql-test/main/deadlock_ftwrl.result
new file mode 100644
index 00000000000..95eed70f664
--- /dev/null
+++ b/mysql-test/main/deadlock_ftwrl.result
@@ -0,0 +1,21 @@
+CREATE TABLE t1(a INT);
+SELECT GET_LOCK("l1", 0);
+GET_LOCK("l1", 0)
+1
+connect con1,localhost,root,,;
+LOCK TABLES t1 WRITE;
+connection default;
+set debug_sync='mdl_acquire_lock_wait SIGNAL ftwrl';
+FLUSH TABLES WITH READ LOCK;
+connection con1;
+set debug_sync='now WAIT_FOR ftwrl';
+SELECT GET_LOCK("l1", 1000);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+disconnect con1;
+connection default;
+SELECT RELEASE_LOCK("l1");
+RELEASE_LOCK("l1")
+1
+UNLOCK TABLES;
+DROP TABLE t1;
+set debug_sync='reset';
diff --git a/mysql-test/main/deadlock_ftwrl.test b/mysql-test/main/deadlock_ftwrl.test
new file mode 100644
index 00000000000..fc943bcf953
--- /dev/null
+++ b/mysql-test/main/deadlock_ftwrl.test
@@ -0,0 +1,36 @@
+# MDEV-20946 Hard FTWRL deadlock under user level locks
+#
+# Deadlock detector should resolve conflicts between FTWRL and user locks.
+
+--source include/have_debug_sync.inc
+--source include/count_sessions.inc
+
+CREATE TABLE t1(a INT);
+SELECT GET_LOCK("l1", 0);
+
+connect(con1,localhost,root,,);
+LOCK TABLES t1 WRITE;
+
+connection default;
+set debug_sync='mdl_acquire_lock_wait SIGNAL ftwrl';
+send FLUSH TABLES WITH READ LOCK;
+# At this point "default" is waiting for tables to be unlocked from
+# LOCK TABLES WRITE issued by "con1".
+
+connection con1;
+set debug_sync='now WAIT_FOR ftwrl';
+# The lock in the following GET_LOCK cannot be acquired since "default" holds
+# a lock on "l1" and is waiting in FLUSH TABLES for con1.
+--error ER_LOCK_DEADLOCK
+SELECT GET_LOCK("l1", 1000);
+disconnect con1; # Performs an implicit UNLOCK TABLES.
+
+connection default;
+reap;
+SELECT RELEASE_LOCK("l1");
+UNLOCK TABLES;
+DROP TABLE t1;
+
+set debug_sync='reset';
+
+--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result
index ca65c66ea41..0d2c2e6acbc 100644
--- a/mysql-test/main/default.result
+++ b/mysql-test/main/default.result
@@ -3388,3 +3388,18 @@ ALTER TABLE t1 ADD b CHAR(255) DEFAULT `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
ERROR 42S22: Unknown column 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' in 'DEFAULT'
DROP TABLE t1;
# end of 10.2 test
+#
+# MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
+# record, which can cause crashes when accessing already released
+# memory.
+#
+CREATE TEMPORARY TABLE t1 (h POINT DEFAULT ST_GEOMFROMTEXT('Point(1 1)')) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES (),();
+ALTER TABLE t1 FORCE;
+SELECT DEFAULT(h) FROM t1;
+SELECT length(DEFAULT(h)) FROM t1;
+length(DEFAULT(h))
+25
+25
+INSERT INTO t1 () VALUES ();
+drop table t1;
diff --git a/mysql-test/main/default.test b/mysql-test/main/default.test
index 27e38eeeb49..c0561deac67 100644
--- a/mysql-test/main/default.test
+++ b/mysql-test/main/default.test
@@ -1,3 +1,5 @@
+--source include/have_innodb.inc
+
#
# test of already fixed bugs
#
@@ -2107,5 +2109,20 @@ CREATE OR REPLACE TABLE t1(i int);
ALTER TABLE t1 ADD b CHAR(255) DEFAULT `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`;
DROP TABLE t1;
-
--echo # end of 10.2 test
+
+--echo #
+--echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default
+--echo # record, which can cause crashes when accessing already released
+--echo # memory.
+--echo #
+
+CREATE TEMPORARY TABLE t1 (h POINT DEFAULT ST_GEOMFROMTEXT('Point(1 1)')) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES (),();
+ALTER TABLE t1 FORCE;
+--disable_result_log
+SELECT DEFAULT(h) FROM t1;
+--enable_result_log
+SELECT length(DEFAULT(h)) FROM t1;
+INSERT INTO t1 () VALUES ();
+drop table t1;
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 0f2e6f0e2a2..7644e65a868 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -17058,6 +17058,290 @@ id
2
3
DROP TABLE t;
+#
+# MDEV-23804: Server crashes in st_select_lex::collect_grouping_fields_for_derived
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (3),(4);
+CREATE VIEW v1 AS SELECT a FROM t1 UNION VALUES (3),(4);
+ANALYZE FORMAT=JSON SELECT * from v1 WHERE a=3;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 2,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 50,
+ "attached_condition": "v1.a = 3",
+ "materialized": {
+ "query_block": {
+ "union_result": {
+ "table_name": "<union2,3>",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "r_rows": 2,
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 2,
+ "r_rows": 2,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 50,
+ "attached_condition": "t1.a = 3"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 3,
+ "operation": "UNION",
+ "table": {
+ "message": "No tables used"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
+SELECT * from v1 WHERE a=3;
+a
+3
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# MDEV-25128: Split optimization for join with materialized semi-join
+#
+create table t1 (id int, a int, index (a), index (id, a)) engine=myisam;
+insert into t1 values
+(17,1),(17,3010),(17,3013),(17,3053),(21,2446),(21,2467),(21,2);
+create table t2 (a int) engine=myisam;
+insert into t2 values (1),(2),(3);
+create table t3 (id int) engine=myisam;
+insert into t3 values (1),(2);
+analyze table t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+set optimizer_switch="split_materialized=off";
+select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+id a a id
+17 1 1 1
+21 2 2 2
+explain select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t1 ref a a 5 test.t3.id 1
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 2
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+2 DERIVED cp2 index NULL a 5 NULL 7 Using index
+explain format=json select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "t3.`id` is not null and t3.`id` is not null"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.id"],
+ "rows": 1,
+ "filtered": 100
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
+ "used_key_parts": ["a"],
+ "ref": ["func"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.id"],
+ "rows": 2,
+ "filtered": 100,
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "cp2",
+ "access_type": "index",
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "rows": 7,
+ "filtered": 100,
+ "using_index": true
+ }
+ }
+ }
+ }
+ }
+}
+set optimizer_switch="split_materialized=default";
+select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+id a a id
+17 1 1 1
+21 2 2 2
+explain select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t1 ref a a 5 test.t3.id 1
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 2
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+2 LATERAL DERIVED cp2 ref a a 5 test.t1.a 1 Using index
+explain format=json select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "t3.`id` is not null and t3.`id` is not null"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.id"],
+ "rows": 1,
+ "filtered": 100
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
+ "used_key_parts": ["a"],
+ "ref": ["func"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.id"],
+ "rows": 2,
+ "filtered": 100,
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "outer_ref_condition": "t1.a is not null",
+ "table": {
+ "table_name": "cp2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.a"],
+ "rows": 1,
+ "filtered": 100,
+ "using_index": true
+ }
+ }
+ }
+ }
+ }
+}
+prepare stmt from "select * from t1, (select a from t1 cp2 group by a) dt, t3
+where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2)";
+execute stmt;
+id a a id
+17 1 1 1
+21 2 2 2
+execute stmt;
+id a a id
+17 1 1 1
+21 2 2 2
+deallocate prepare stmt;
+drop table t1,t2,t3;
# End of 10.3 tests
#
# MDEV-18679: materialized view with SELECT S containing materialized
diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test
index 7667cd44ed2..a880712c8bd 100644
--- a/mysql-test/main/derived_cond_pushdown.test
+++ b/mysql-test/main/derived_cond_pushdown.test
@@ -3466,6 +3466,56 @@ eval set statement optimizer_switch='split_materialized=on' for $q;
DROP TABLE t;
+--echo #
+--echo # MDEV-23804: Server crashes in st_select_lex::collect_grouping_fields_for_derived
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (3),(4);
+CREATE VIEW v1 AS SELECT a FROM t1 UNION VALUES (3),(4);
+--source include/analyze-format.inc
+ANALYZE FORMAT=JSON SELECT * from v1 WHERE a=3;
+SELECT * from v1 WHERE a=3;
+DROP VIEW v1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-25128: Split optimization for join with materialized semi-join
+--echo #
+
+create table t1 (id int, a int, index (a), index (id, a)) engine=myisam;
+insert into t1 values
+(17,1),(17,3010),(17,3013),(17,3053),(21,2446),(21,2467),(21,2);
+
+create table t2 (a int) engine=myisam;
+insert into t2 values (1),(2),(3);
+
+create table t3 (id int) engine=myisam;
+insert into t3 values (1),(2);
+
+analyze table t1,t2,t3;
+
+let $q=
+select * from t1, (select a from t1 cp2 group by a) dt, t3
+ where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
+
+set optimizer_switch="split_materialized=off";
+eval $q;
+eval explain $q;
+eval explain format=json $q;
+
+set optimizer_switch="split_materialized=default";
+eval $q;
+eval explain $q;
+eval explain format=json $q;
+
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop table t1,t2,t3;
+
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/derived_opt.result b/mysql-test/main/derived_opt.result
index c30f56d9925..907c97e92d4 100644
--- a/mysql-test/main/derived_opt.result
+++ b/mysql-test/main/derived_opt.result
@@ -540,4 +540,31 @@ id select_type table type possible_keys key key_len ref rows Extra
set join_cache_level=default;
set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1,t2;
+set @save_optimizer_switch= @@optimizer_switch;
+set optimizer_switch="derived_merge=on";
+CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
+insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
+CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
+insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
+prepare stmt from "
+SELECT * from
+ (SELECT min(d2) AS d2, min(d1) AS d1 FROM
+ (SELECT t1.d2 AS d2, (SELECT t2.d1
+ FROM t2 WHERE t1.id1 = t2.id1
+ ORDER BY t2.id DESC LIMIT 1) AS d1
+ FROM t1
+ ) dt2
+ ) ca
+ ORDER BY ca.d2;";
+execute stmt;
+d2 d1
+2020-01-01 10:10:10 2020-01-01 10:10:10
+execute stmt;
+d2 d1
+2020-01-01 10:10:10 2020-01-01 10:10:10
+set optimizer_switch= @save_optimizer_switch;
+DROP TABLE t1, t2;
+#
+# End of 10.3 tests
+#
set optimizer_switch=@exit_optimizer_switch;
diff --git a/mysql-test/main/derived_opt.test b/mysql-test/main/derived_opt.test
index eccf4c13020..dee424559ee 100644
--- a/mysql-test/main/derived_opt.test
+++ b/mysql-test/main/derived_opt.test
@@ -406,5 +406,38 @@ set optimizer_switch= @save_optimizer_switch;
DROP TABLE t1,t2;
+#
+# MDEV-25182: Complex query in Store procedure corrupts results
+#
+set @save_optimizer_switch= @@optimizer_switch;
+set optimizer_switch="derived_merge=on";
+
+CREATE TABLE t1 (id int, d2 datetime, id1 int) ;
+insert into t1 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',3);
+
+CREATE TABLE t2 (id int, d1 datetime, id1 int) ;
+insert into t2 values (1,'2020-01-01 10:10:10',1),(2,'2020-01-01 10:10:10',2),(3,'2020-01-01 10:10:10',2);
+
+prepare stmt from "
+SELECT * from
+ (SELECT min(d2) AS d2, min(d1) AS d1 FROM
+ (SELECT t1.d2 AS d2, (SELECT t2.d1
+ FROM t2 WHERE t1.id1 = t2.id1
+ ORDER BY t2.id DESC LIMIT 1) AS d1
+ FROM t1
+ ) dt2
+ ) ca
+ ORDER BY ca.d2;";
+
+execute stmt;
+execute stmt;
+
+set optimizer_switch= @save_optimizer_switch;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
# The following command must be the last one the file
set optimizer_switch=@exit_optimizer_switch;
diff --git a/mysql-test/main/empty_string_literal.result b/mysql-test/main/empty_string_literal.result
index 2ca491a7dd8..bbcf27cf993 100644
--- a/mysql-test/main/empty_string_literal.result
+++ b/mysql-test/main/empty_string_literal.result
@@ -179,3 +179,32 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 select NULL AS `NULL`
+#
+# MDEV-20763 Table corruption or Assertion `btr_validate_index(index, 0, false)' failed in row_upd_sec_index_entry with virtual column and EMPTY_STRING_IS_NULL SQL mode
+#
+create table t1 (a int, b binary(1) generated always as (''), key(a,b));
+insert into t1 (a) values (1);
+set sql_mode= default;
+flush tables;
+update t1 set a = 2;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` binary(1) GENERATED ALWAYS AS (NULL) VIRTUAL,
+ KEY `a` (`a`,`b`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1 (a int, b binary(1) generated always as (''), key(a,b));
+insert into t1 (a) values (1);
+set sql_mode= 'empty_string_is_null';
+flush tables;
+update t1 set a = 2;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` binary(1) GENERATED ALWAYS AS ('') VIRTUAL,
+ KEY `a` (`a`,`b`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
diff --git a/mysql-test/main/empty_string_literal.test b/mysql-test/main/empty_string_literal.test
index 71e98d872bb..9174a7714a2 100644
--- a/mysql-test/main/empty_string_literal.test
+++ b/mysql-test/main/empty_string_literal.test
@@ -6,3 +6,22 @@ USE test;
set @mode='EMPTY_STRING_IS_NULL';
--source include/empty_string_literal.inc
+
+--echo #
+--echo # MDEV-20763 Table corruption or Assertion `btr_validate_index(index, 0, false)' failed in row_upd_sec_index_entry with virtual column and EMPTY_STRING_IS_NULL SQL mode
+--echo #
+create table t1 (a int, b binary(1) generated always as (''), key(a,b));
+insert into t1 (a) values (1);
+set sql_mode= default;
+flush tables;
+update t1 set a = 2;
+show create table t1;
+drop table t1;
+
+create table t1 (a int, b binary(1) generated always as (''), key(a,b));
+insert into t1 (a) values (1);
+set sql_mode= 'empty_string_is_null';
+flush tables;
+update t1 set a = 2;
+show create table t1;
+drop table t1;
diff --git a/mysql-test/main/flush_and_binlog.result b/mysql-test/main/flush_and_binlog.result
new file mode 100644
index 00000000000..a1d73c6590f
--- /dev/null
+++ b/mysql-test/main/flush_and_binlog.result
@@ -0,0 +1,33 @@
+#
+# MDEV-23843 Assertions in Diagnostics_area upon table operations under
+# FTWRL
+#
+CREATE TABLE t1 (a INT);
+FLUSH TABLES WITH READ LOCK;
+connect con1,localhost,root,,;
+SET lock_wait_timeout= 1;
+OPTIMIZE TABLE t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+disconnect con1;
+connection default;
+UNLOCK TABLES;
+DROP TABLE t1;
+FLUSH TABLES WITH READ LOCK;
+connect con1,localhost,root,,test;
+SET lock_wait_timeout= 1;
+FLUSH TABLES;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection default;
+disconnect con1;
+unlock tables;
+# Second test from MDEV-23843
+CREATE TABLE t (a INT);
+FLUSH TABLES WITH READ LOCK;
+connect con1,localhost,root,,;
+SET lock_wait_timeout= 1;
+ANALYZE TABLE t;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+disconnect con1;
+connection default;
+UNLOCK TABLES;
+DROP TABLE t;
diff --git a/mysql-test/main/flush_and_binlog.test b/mysql-test/main/flush_and_binlog.test
new file mode 100644
index 00000000000..a28d8e365dd
--- /dev/null
+++ b/mysql-test/main/flush_and_binlog.test
@@ -0,0 +1,43 @@
+--source include/have_log_bin.inc
+
+--echo #
+--echo # MDEV-23843 Assertions in Diagnostics_area upon table operations under
+--echo # FTWRL
+--echo #
+
+CREATE TABLE t1 (a INT);
+FLUSH TABLES WITH READ LOCK;
+--connect (con1,localhost,root,,)
+SET lock_wait_timeout= 1;
+--error ER_LOCK_WAIT_TIMEOUT
+OPTIMIZE TABLE t1;
+# Cleanup
+--disconnect con1
+--connection default
+UNLOCK TABLES;
+DROP TABLE t1;
+#
+# Second test case from MDEV_23843
+#
+FLUSH TABLES WITH READ LOCK;
+--connect (con1,localhost,root,,test)
+SET lock_wait_timeout= 1;
+--error ER_LOCK_WAIT_TIMEOUT
+FLUSH TABLES;
+--connection default
+--disconnect con1
+unlock tables;
+
+--echo # Second test from MDEV-23843
+
+CREATE TABLE t (a INT);
+FLUSH TABLES WITH READ LOCK;
+--connect (con1,localhost,root,,)
+SET lock_wait_timeout= 1;
+--error ER_LOCK_WAIT_TIMEOUT
+ANALYZE TABLE t;
+# Cleanup
+--disconnect con1
+--connection default
+UNLOCK TABLES;
+DROP TABLE t;
diff --git a/mysql-test/main/func_gconcat.result b/mysql-test/main/func_gconcat.result
index 94c24a63bb5..a7517e98134 100644
--- a/mysql-test/main/func_gconcat.result
+++ b/mysql-test/main/func_gconcat.result
@@ -1280,6 +1280,18 @@ Name_exp_1
DROP VIEW v1;
DROP TABLE t1;
#
+# MDEV-4677 GROUP_CONCAT not showing any output with group_concat_max_len >= 4Gb
+#
+set group_concat_max_len=1024*1024*1024*4;
+Warnings:
+Warning 1292 Truncated incorrect group_concat_max_len value: '4294967296'
+create table t1 (i int, j int);
+insert into t1 values (1,1),(1,2);
+select i, group_concat(j) from t1 group by i;
+i group_concat(j)
+1 1,2
+drop table t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/func_gconcat.test b/mysql-test/main/func_gconcat.test
index 2ab856e0edd..3c21aa04ffc 100644
--- a/mysql-test/main/func_gconcat.test
+++ b/mysql-test/main/func_gconcat.test
@@ -939,6 +939,16 @@ DROP VIEW v1;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4677 GROUP_CONCAT not showing any output with group_concat_max_len >= 4Gb
+--echo #
+set group_concat_max_len=1024*1024*1024*4;
+create table t1 (i int, j int);
+insert into t1 values (1,1),(1,2);
+select i, group_concat(j) from t1 group by i;
+drop table t1;
+
--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result
index 9311a556191..071c155cd6b 100644
--- a/mysql-test/main/func_group.result
+++ b/mysql-test/main/func_group.result
@@ -606,7 +606,7 @@ select min(a1) from t1 where a1 > 'KKK' or a1 < 'XXX';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index PRIMARY PRIMARY 3 NULL 15 Using where; Using index
explain
-select min(a1) from t1 where a1 != 'KKK';
+select min(a1) from t1 where (a1 < 'KKK' or a1 > 'KKK');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index PRIMARY PRIMARY 3 NULL 15 Using where; Using index
explain
@@ -2460,7 +2460,38 @@ count(*)+sleep(0)
2
drop table t1;
#
-# Start of 10.3 tests
+# MDEV-25112: MIN/MAX optimization for query containing BETWEEN in WHERE
+#
+create table t1 (a int) engine=myisam;
+insert into t1 values (267), (273), (287), (303), (308);
+select max(a) from t1 where a < 303 and (a between 267 AND 287);
+max(a)
+287
+explain select max(a) from t1 where a < 303 and (a between 267 AND 287);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+select min(a) from t1 where a > 267 and (a between 273 AND 303);
+min(a)
+273
+explain select min(a) from t1 where a > 267 and (a between 273 AND 303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+create index idx on t1(a);
+select max(a) from t1 where a < 303 and (a between 267 AND 287);
+max(a)
+287
+explain select max(a) from t1 where a < 303 and (a between 267 AND 287);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+select min(a) from t1 where a > 267 and (a between 273 AND 303);
+min(a)
+273
+explain select min(a) from t1 where a > 267 and (a between 273 AND 303);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+drop table t1;
+#
+# End of 10.2 tests
#
#
# MDEV-9408 CREATE TABLE SELECT MAX(int_column) creates different columns for table vs view
@@ -2492,3 +2523,6 @@ t2 CREATE TABLE `t2` (
DROP TABLE t2;
DROP VIEW v1;
DROP TABLE t1;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/func_group.test b/mysql-test/main/func_group.test
index bc2d6e9047d..a28b39c28f6 100644
--- a/mysql-test/main/func_group.test
+++ b/mysql-test/main/func_group.test
@@ -359,7 +359,7 @@ select min(t1.a1), min(t2.a4) from t1,t2 where t1.a1 < 'KKK' and t2.a4 < 'KKK';
explain
select min(a1) from t1 where a1 > 'KKK' or a1 < 'XXX';
explain
-select min(a1) from t1 where a1 != 'KKK';
+select min(a1) from t1 where (a1 < 'KKK' or a1 > 'KKK');
explain
select max(a3) from t1 where a2 < 2 and a3 < 'SEA';
explain
@@ -1705,7 +1705,33 @@ select count(*)+sleep(0) from t1;
drop table t1;
--echo #
---echo # Start of 10.3 tests
+--echo # MDEV-25112: MIN/MAX optimization for query containing BETWEEN in WHERE
+--echo #
+
+create table t1 (a int) engine=myisam;
+insert into t1 values (267), (273), (287), (303), (308);
+
+let $q1=
+select max(a) from t1 where a < 303 and (a between 267 AND 287);
+let $q2=
+select min(a) from t1 where a > 267 and (a between 273 AND 303);
+
+eval $q1;
+eval explain $q1;
+eval $q2;
+eval explain $q2;
+
+create index idx on t1(a);
+
+eval $q1;
+eval explain $q1;
+eval $q2;
+eval explain $q2;
+
+drop table t1;
+
+--echo #
+--echo # End of 10.2 tests
--echo #
--echo #
@@ -1730,3 +1756,7 @@ DROP TABLE t2;
DROP VIEW v1;
DROP TABLE t1;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/func_like.result b/mysql-test/main/func_like.result
index a937037167c..fc306c42067 100644
--- a/mysql-test/main/func_like.result
+++ b/mysql-test/main/func_like.result
@@ -289,6 +289,24 @@ a b c d
3 f_ 1 0 1
3 f\_ 0 1 0
drop table t1;
+create table t1 (f int);
+insert t1 values (1),(2);
+select 1 from (select distinct * from t1) as x where f < (select 1 like 2 escape (3=1));
+1
+drop table t1;
+create table t1(f1 int);
+insert into t1 values(1);
+update (select 1 like 2 escape (1 in (select 1 from t1))) x, t1 as d set d.f1 = 1;
+ERROR HY000: Incorrect arguments to ESCAPE
+select * from (select 1 like 2 escape (1 in (select 1 from t1))) x;
+1 like 2 escape (1 in (select 1 from t1))
+0
+drop table t1;
+create table t1 (f int);
+insert t1 values (1),(2);
+create view v1 as select * from t1 where (1 like 2 escape (3 in (('h', 'b') in (select 'k', 'k' union select 'g', 'j'))) and f >= 0);
+drop view v1;
+drop table t1;
#
# MDEV-17359 - Extend expression supported by like (| & << >> || + - * / DIV MOD ^ )
#
diff --git a/mysql-test/main/func_like.test b/mysql-test/main/func_like.test
index cb50fb91879..ef13d46c268 100644
--- a/mysql-test/main/func_like.test
+++ b/mysql-test/main/func_like.test
@@ -187,7 +187,7 @@ DROP TABLE t1;
--echo #
#
-# Item_func_line::print()
+# Item_func_like::print()
#
create view v1 as select 'foo!' like 'foo!!', 'foo!' like 'foo!!' escape '!';
show create view v1;
@@ -208,6 +208,33 @@ set sql_mode=default;
select * from t1;
drop table t1;
+#
+# Item_func_like::fix_fields()
+#
+create table t1 (f int);
+insert t1 values (1),(2);
+select 1 from (select distinct * from t1) as x where f < (select 1 like 2 escape (3=1));
+drop table t1;
+
+#
+# Item_func_like::fix_fields, ESCAPE, const_item()
+#
+create table t1(f1 int);
+insert into t1 values(1);
+--error ER_WRONG_ARGUMENTS
+update (select 1 like 2 escape (1 in (select 1 from t1))) x, t1 as d set d.f1 = 1;
+select * from (select 1 like 2 escape (1 in (select 1 from t1))) x;
+drop table t1;
+
+#
+# Item_func_like::walk
+#
+create table t1 (f int);
+insert t1 values (1),(2);
+create view v1 as select * from t1 where (1 like 2 escape (3 in (('h', 'b') in (select 'k', 'k' union select 'g', 'j'))) and f >= 0);
+drop view v1;
+drop table t1;
+
--echo #
--echo # MDEV-17359 - Extend expression supported by like (| & << >> || + - * / DIV MOD ^ )
--echo #
diff --git a/mysql-test/main/gis-json.result b/mysql-test/main/gis-json.result
index 1d6e2193fc9..e52a7c809c6 100644
--- a/mysql-test/main/gis-json.result
+++ b/mysql-test/main/gis-json.result
@@ -62,9 +62,9 @@ SELECT st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features":
st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}'))
GEOMETRYCOLLECTION(POINT(102 0.5))
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',5));
-ERROR HY000: Incorrect option value: '5' for function ST_GeometryFromJSON
+ERROR HY000: Incorrect option value: '5' for function ST_GeomFromGeoJSON
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',1));
-ERROR 22023: Invalid GIS data provided to function ST_GeometryFromJSON.
+ERROR 22023: Invalid GIS data provided to function ST_GeomFromGeoJSON.
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2));
ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2))
POINT(5.3 15)
@@ -104,6 +104,9 @@ a
NULL
Warnings:
Warning 4076 Incorrect GeoJSON format - empty 'coordinates' array.
+SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }");
+ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }")
+NULL
#
# End of 10.2 tests
#
diff --git a/mysql-test/main/gis-json.test b/mysql-test/main/gis-json.test
index b91ef235fd0..a97e9411e5c 100644
--- a/mysql-test/main/gis-json.test
+++ b/mysql-test/main/gis-json.test
@@ -44,6 +44,8 @@ SELECT st_astext(st_geomfromgeojson('{"type": "MultiLineString","coordinates": [
SELECT st_astext(st_geomfromgeojson('{"type": "Polygon","coordinates": []}')) as a;
SELECT st_astext(st_geomfromgeojson('{"type": "MultiPolygon","coordinates": []}')) as a;
+SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }");
+
--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/gis-precise.result b/mysql-test/main/gis-precise.result
index 4e4161c34ec..513d8b6e8c2 100644
--- a/mysql-test/main/gis-precise.result
+++ b/mysql-test/main/gis-precise.result
@@ -806,3 +806,114 @@ SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) '))));
ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'),
SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) '))))
POLYGON((9 9,5 2,4 5,9 9))
+#
+# MDEV-13467 Feature request: Support for ST_Distance_Sphere()
+#
+SELECT ST_DISTANCE_SPHERE();
+ERROR 42000: Incorrect parameter count in the call to native function 'ST_DISTANCE_SPHERE'
+SELECT ST_DISTANCE_SPHERE(NULL);
+ERROR 42000: Incorrect parameter count in the call to native function 'ST_DISTANCE_SPHERE'
+SELECT ST_DISTANCE_SPHERE(NULL, NULL);
+ST_DISTANCE_SPHERE(NULL, NULL)
+NULL
+SELECT ST_DISTANCE_SPHERE(NULL, NULL, 3);
+ST_DISTANCE_SPHERE(NULL, NULL, 3)
+NULL
+SELECT ST_DISTANCE_SPHERE(NULL, 1, 3);
+ST_DISTANCE_SPHERE(NULL, 1, 3)
+NULL
+SELECT ST_DISTANCE_SPHERE(1, NULL, 3);
+ST_DISTANCE_SPHERE(1, NULL, 3)
+NULL
+SELECT ST_DISTANCE_SPHERE(1, 1);
+ERROR 22023: Invalid GIS data provided to function ST_Distance_Sphere.
+SELECT ST_DISTANCE_SPHERE(1, 1, 3);
+ERROR 22023: Invalid GIS data provided to function ST_Distance_Sphere.
+SELECT ST_DISTANCE_SPHERE(1, 1, NULL);
+ST_DISTANCE_SPHERE(1, 1, NULL)
+NULL
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('LINESTRING(0 0, 1 1)'));
+ERROR HY000: Internal error: st_distance_sphere
+# Test Points and radius
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'))
+157249.0357231545
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(-1 -1)'), ST_GEOMFROMTEXT('POINT(-2 -2)')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(-1 -1)'), ST_GEOMFROMTEXT('POINT(-2 -2)')), 10)
+157225.0865419108
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), 1);
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), 1)
+0.024682056391766436
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), 0);
+ERROR HY000: Internal error: Radius must be greater than zero.
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), -1);
+ERROR HY000: Internal error: Radius must be greater than zero.
+# Test longitude/lattitude
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10)
+157225.0865419108
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(2 1)')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(2 1)')), 10)
+222355.4901806686
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10)
+222389.3645969269
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(2 1)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(2 1)'))
+157249.0357231545
+# Test Points - Multipoints
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1)'))
+157249.0357231545
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 1)'), ST_GEOMFROMTEXT('POINT(0 0)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 1)'), ST_GEOMFROMTEXT('POINT(0 0)'))
+157249.0357231545
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'))
+157249.0357231545
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'))
+157249.0357231545
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'), 1);
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'), 1)
+0.024682056391766436
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'), 1);
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'), 1)
+0.024682056391766436
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1, 3 4)'), 1);
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1, 3 4)'), 1)
+0.024682056391766436
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1,5 6)'), 1);
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1,5 6)'), 1)
+0.024682056391766436
+# Test Multipoints - Multipoints
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'));
+ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'))
+0
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )')), 10)
+314282.5644496733
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )')), 10);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )')), 10)
+314282.5644496733
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )'),1), 17);
+TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )'),1), 17)
+0.04933028646581131
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )'),0);
+ERROR HY000: Internal error: Radius must be greater than zero.
+set @pt1 = ST_GeomFromText('POINT(190 -30)');
+set @pt2 = ST_GeomFromText('POINT(-30 50)');
+SELECT ST_Distance_Sphere(@pt1, @pt2);
+ERROR HY000: Out of range error: Longitude should be [-180,180] in function ST_Distance_Sphere.
+set @pt1 = ST_GeomFromText('POINT(135 -30)');
+set @pt2 = ST_GeomFromText('POINT(-30 91)');
+SELECT ST_Distance_Sphere(@pt1, @pt2);
+ERROR HY000: Out of range error: Latitude should be [-90,90] in function ST_Distance_Sphere.
+set @zenica = ST_GeomFromText('POINT(17.907743 44.203438)');
+set @sarajevo = ST_GeomFromText('POINT(18.413076 43.856258)');
+SELECT TRUNCATE(ST_Distance_Sphere(@zenica, @sarajevo), 10);
+TRUNCATE(ST_Distance_Sphere(@zenica, @sarajevo), 10)
+55878.5933759170
+SELECT TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10);
+TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10)
+55878.5933759170
diff --git a/mysql-test/main/gis-precise.test b/mysql-test/main/gis-precise.test
index 24f4ac9113e..da72a0c4d8a 100644
--- a/mysql-test/main/gis-precise.test
+++ b/mysql-test/main/gis-precise.test
@@ -394,3 +394,81 @@ with cte1 as( select (st_symdifference(point(1,1),point(1,1))) as a1 ), cte2 a
--source include/gis_debug.inc
+
+--echo #
+--echo # MDEV-13467 Feature request: Support for ST_Distance_Sphere()
+--echo #
+
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT ST_DISTANCE_SPHERE();
+--error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
+SELECT ST_DISTANCE_SPHERE(NULL);
+SELECT ST_DISTANCE_SPHERE(NULL, NULL);
+# NULL args and optional radius will return NULL
+SELECT ST_DISTANCE_SPHERE(NULL, NULL, 3);
+# At least 1 NULL arg and optional radius will return NULL
+SELECT ST_DISTANCE_SPHERE(NULL, 1, 3);
+# At least 1 NULL arg and optional radius will return NULL
+SELECT ST_DISTANCE_SPHERE(1, NULL, 3);
+# Return ER_GIS_INVALID_DATA for invalid geometry
+--error ER_GIS_INVALID_DATA
+SELECT ST_DISTANCE_SPHERE(1, 1);
+--error ER_GIS_INVALID_DATA
+SELECT ST_DISTANCE_SPHERE(1, 1, 3);
+# Return NULL if radius is NULL
+SELECT ST_DISTANCE_SPHERE(1, 1, NULL);
+# Wrong geometry
+--error ER_INTERNAL_ERROR
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('LINESTRING(0 0, 1 1)'));
+
+--echo # Test Points and radius
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'));
+# make bb x86 happy
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(-1 -1)'), ST_GEOMFROMTEXT('POINT(-2 -2)')), 10);
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), 1);
+--error ER_INTERNAL_ERROR
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), 0);
+--error ER_INTERNAL_ERROR
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('POINT(1 1)'), -1);
+--echo # Test longitude/lattitude
+# make bb x86 happy
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10);
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 1)'), ST_GEOMFROMTEXT('POINT(2 1)')), 10);
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(1 2)')), 10);
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(1 0)'), ST_GEOMFROMTEXT('POINT(2 1)'));
+--echo # Test Points - Multipoints
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1)'));
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 1)'), ST_GEOMFROMTEXT('POINT(0 0)'));
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'));
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'));
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(1 1,2 2)'), 1);
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2,1 1)'), 1);
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1, 3 4)'), 1);
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('POINT(0 0)'), ST_GEOMFROMTEXT('MULTIPOINT(2 2, 1 1,5 6)'), 1);
+--echo # Test Multipoints - Multipoints
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )'));
+# make bb x86 happy
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(3 4,8 9 )')), 10);
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )')), 10);
+# make bb x86 happy
+SELECT TRUNCATE(ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )'),1), 17);
+--error ER_INTERNAL_ERROR
+SELECT ST_DISTANCE_SPHERE(ST_GEOMFROMTEXT('MULTIPOINT(1 2,1 1 )'), ST_GEOMFROMTEXT('MULTIPOINT(8 9,3 4 )'),0);
+
+# Longitude out of range [-180,180]
+set @pt1 = ST_GeomFromText('POINT(190 -30)');
+set @pt2 = ST_GeomFromText('POINT(-30 50)');
+--error ER_STD_OUT_OF_RANGE_ERROR
+SELECT ST_Distance_Sphere(@pt1, @pt2);
+
+# Latitude out of range [-90, 90]
+set @pt1 = ST_GeomFromText('POINT(135 -30)');
+set @pt2 = ST_GeomFromText('POINT(-30 91)');
+--error ER_STD_OUT_OF_RANGE_ERROR
+SELECT ST_Distance_Sphere(@pt1, @pt2);
+
+# POINT in form (longitude[-180, 180] latitude[-90, 90])
+set @zenica = ST_GeomFromText('POINT(17.907743 44.203438)');
+set @sarajevo = ST_GeomFromText('POINT(18.413076 43.856258)');
+SELECT TRUNCATE(ST_Distance_Sphere(@zenica, @sarajevo), 10);
+SELECT TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10);
diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result
index ecd115c2dd0..db75287c61c 100644
--- a/mysql-test/main/group_by.result
+++ b/mysql-test/main/group_by.result
@@ -2884,6 +2884,52 @@ GROUP BY t.table_name;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
SET max_sort_length= @save_max_sort_length;
#
+# MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view
+#
+CREATE TABLE t1
+(
+id INT PRIMARY KEY AUTO_INCREMENT,
+dt datetime,
+INDEX(dt),
+foo int
+);
+INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1);
+INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1);
+INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1);
+INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1);
+INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1);
+INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1);
+INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1);
+INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1);
+INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1);
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt;
+SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+dt foo
+2020-09-25 12:00:00 1
+2020-09-25 13:00:00 1
+2020-09-26 12:00:00 1
+2020-09-26 13:00:00 2
+2020-09-27 12:00:00 1
+2020-09-27 13:00:00 1
+2020-09-28 12:00:00 1
+2020-09-28 13:00:00 1
+SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+dt foo
+2020-09-25 12:00:00 1
+2020-09-25 13:00:00 1
+2020-09-26 12:00:00 1
+2020-09-26 13:00:00 2
+2020-09-27 12:00:00 1
+2020-09-27 13:00:00 1
+2020-09-28 12:00:00 1
+2020-09-28 13:00:00 1
+DROP TABLE t1;
+DROP VIEW v1,v2;
+#
+# End of 10.2 tests
+#
+#
# MDEV-16170
# Server crashes in Item_null_result::type_handler on SELECT with ROLLUP
#
@@ -2894,4 +2940,35 @@ f COUNT(*)
1 1
NULL 1
DROP TABLE t1;
+#
+# MDEV-24710 Uninitialized value upon CREATE .. SELECT ... VALUE
+#
+CREATE TABLE t1 (a VARCHAR(8) NOT NULL DEFAULT '');
+INSERT INTO t1 (a) VALUES ('foo');
+CREATE TABLE t2 AS SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE VALUE(a) IS NOT NULL;
+SELECT * from t2;
+f1 f2
+NULL NULL
+SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE VALUE(a) IS NOT NULL;
+f1 f2
+NULL NULL
+SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE 1=0;
+f1 f2
+NULL NULL
+drop table t1,t2;
+# Extra test by to check the fix for MDEV-24710
+create table t20 (pk int primary key, a int);
+insert into t20 values (1,1);
+create table t21 (pk int primary key, b int not null);
+insert into t21 values (1,1);
+create table t22 (a int);
+insert into t22 values (1),(2);
+select a, (select max(t21.b) from t20 left join t21 on t21.pk=t20.a+10
+where t20.pk=1 and rand(123) < 0.5) as SUBQ from t22;
+a SUBQ
+1 NULL
+2 NULL
+drop table t20, t21, t22;
+#
# End of 10.3 tests
+#
diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test
index 3dc0ee9f00b..3ca518420a0 100644
--- a/mysql-test/main/group_by.test
+++ b/mysql-test/main/group_by.test
@@ -1987,7 +1987,6 @@ drop table t1;
--echo # GROUP BY leads to crash
--echo #
-
CALL mtr.add_suppression("Out of sort memory");
CALL mtr.add_suppression("Sort aborted");
SET @save_max_sort_length= @@max_sort_length;
@@ -2000,6 +1999,40 @@ GROUP BY t.table_name;
SET max_sort_length= @save_max_sort_length;
--echo #
+--echo # MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view
+--echo #
+
+CREATE TABLE t1
+(
+ id INT PRIMARY KEY AUTO_INCREMENT,
+ dt datetime,
+ INDEX(dt),
+ foo int
+);
+
+INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1);
+INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1);
+INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1);
+INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1);
+INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1);
+INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1);
+INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1);
+INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1);
+INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1);
+
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt;
+SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+
+DROP TABLE t1;
+DROP VIEW v1,v2;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
--echo # MDEV-16170
--echo # Server crashes in Item_null_result::type_handler on SELECT with ROLLUP
--echo #
@@ -2009,4 +2042,29 @@ INSERT INTO t1 VALUES ('2032-10-08');
SELECT d != '2023-03-04' AS f, COUNT(*) FROM t1 GROUP BY d WITH ROLLUP;
DROP TABLE t1;
+--echo #
+--echo # MDEV-24710 Uninitialized value upon CREATE .. SELECT ... VALUE
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(8) NOT NULL DEFAULT '');
+INSERT INTO t1 (a) VALUES ('foo');
+CREATE TABLE t2 AS SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE VALUE(a) IS NOT NULL;
+SELECT * from t2;
+SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE VALUE(a) IS NOT NULL;
+SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE 1=0;
+drop table t1,t2;
+
+--echo # Extra test by to check the fix for MDEV-24710
+
+create table t20 (pk int primary key, a int);
+insert into t20 values (1,1);create table t21 (pk int primary key, b int not null);
+insert into t21 values (1,1);
+create table t22 (a int);
+insert into t22 values (1),(2);
+select a, (select max(t21.b) from t20 left join t21 on t21.pk=t20.a+10
+ where t20.pk=1 and rand(123) < 0.5) as SUBQ from t22;
+drop table t20, t21, t22;
+
+--echo #
--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result
index a28cc418207..8d240f9f36d 100644
--- a/mysql-test/main/group_min_max.result
+++ b/mysql-test/main/group_min_max.result
@@ -2940,7 +2940,7 @@ NULL
EXPLAIN
SELECT MIN( a ) FROM t1 WHERE a <> NULL;
id select_type table type possible_keys key key_len ref rows Extra
-x x x x x x x x x Impossible WHERE noticed after reading const tables
+x x x x x x x x x Using where; Using index
SELECT MIN( a ) FROM t1 WHERE a <> NULL;
MIN( a )
NULL
diff --git a/mysql-test/main/having.result b/mysql-test/main/having.result
index 703f013c2da..51b88c5b8d2 100644
--- a/mysql-test/main/having.result
+++ b/mysql-test/main/having.result
@@ -847,6 +847,39 @@ t r
DROP TABLE t1;
DROP FUNCTION next_seq_value;
DROP TABLE series;
+#
+# MDEV-24958 Server crashes in my_strtod /
+# Value_source::Converter_strntod::Converter_strntod with DEFAULT(blob)
+#
+# MDEV-24942 Server crashes in _ma_rec_pack / _ma_write_blob_record with
+# DEFAULT() on BLOB
+#
+CREATE TABLE t1 (id INT, f MEDIUMTEXT NOT NULL DEFAULT 'A');
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+SELECT f FROM t1 GROUP BY id ORDER BY DEFAULT(f);
+f
+foo
+bar
+SELECT DEFAULT(f) AS h FROM t1 HAVING h > 5;
+h
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'A'
+SELECT DEFAULT(f) AS h FROM t1 HAVING h >= 0;
+h
+A
+A
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'A'
+SELECT DEFAULT(f) AS h FROM t1 HAVING h >= 'A';
+h
+A
+A
+alter table t1 add column b int default (rand()+1+3);
+select default(b) AS h FROM t1 HAVING h > "2";
+h
+#
+#
+drop table t1;
# End of 10.3 tests
#
# MDEV-18681: AND formula in HAVING with several occurances
diff --git a/mysql-test/main/having.test b/mysql-test/main/having.test
index 072f1a088dc..7e0a0439f8e 100644
--- a/mysql-test/main/having.test
+++ b/mysql-test/main/having.test
@@ -891,6 +891,27 @@ DROP TABLE t1;
DROP FUNCTION next_seq_value;
DROP TABLE series;
+
+--echo #
+--echo # MDEV-24958 Server crashes in my_strtod /
+--echo # Value_source::Converter_strntod::Converter_strntod with DEFAULT(blob)
+--echo #
+--echo # MDEV-24942 Server crashes in _ma_rec_pack / _ma_write_blob_record with
+--echo # DEFAULT() on BLOB
+--echo #
+
+CREATE TABLE t1 (id INT, f MEDIUMTEXT NOT NULL DEFAULT 'A');
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+SELECT f FROM t1 GROUP BY id ORDER BY DEFAULT(f);
+SELECT DEFAULT(f) AS h FROM t1 HAVING h > 5;
+SELECT DEFAULT(f) AS h FROM t1 HAVING h >= 0;
+SELECT DEFAULT(f) AS h FROM t1 HAVING h >= 'A';
+
+alter table t1 add column b int default (rand()+1+3);
+--replace_column 1 #
+select default(b) AS h FROM t1 HAVING h > "2";
+drop table t1;
+
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/index_merge_myisam.result b/mysql-test/main/index_merge_myisam.result
index 387bab6b408..55caf051720 100644
--- a/mysql-test/main/index_merge_myisam.result
+++ b/mysql-test/main/index_merge_myisam.result
@@ -1687,7 +1687,8 @@ INSERT INTO t1 VALUES
ALTER TABLE t1 ENABLE KEYS;
EXPLAIN
SELECT * FROM t1 FORCE KEY (PRIMARY , i , c1 , c2)
-WHERE pk = 255 OR i = 22 OR (pk IN (1 , 136) AND c2 IN ('c' , 'w') AND (c1 NOT BETWEEN 'e' AND 'i' OR c2 > 'g')) OR pk != 1 ;
+WHERE pk = 255 OR i = 22 OR (pk IN (1 , 136) AND c2 IN ('c' , 'w') AND (c1
+NOT BETWEEN 'e' AND 'i' OR c2 > 'g')) OR (pk is not null and (pk <1 or pk>1)) ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge PRIMARY,c1,i,c2 PRIMARY,i 0,5 NULL 69 Using sort_union(PRIMARY,i); Using where
DROP TABLE t1;
diff --git a/mysql-test/main/index_merge_myisam.test b/mysql-test/main/index_merge_myisam.test
index 0b0d8d60c15..b77c9bc1ca2 100644
--- a/mysql-test/main/index_merge_myisam.test
+++ b/mysql-test/main/index_merge_myisam.test
@@ -236,9 +236,11 @@ INSERT INTO t1 VALUES
ALTER TABLE t1 ENABLE KEYS;
+# note: (pk is not null and (pk <1 or pk>1)) below is a sargable form of pk!=1
EXPLAIN
SELECT * FROM t1 FORCE KEY (PRIMARY , i , c1 , c2)
-WHERE pk = 255 OR i = 22 OR (pk IN (1 , 136) AND c2 IN ('c' , 'w') AND (c1 NOT BETWEEN 'e' AND 'i' OR c2 > 'g')) OR pk != 1 ;
+WHERE pk = 255 OR i = 22 OR (pk IN (1 , 136) AND c2 IN ('c' , 'w') AND (c1
+NOT BETWEEN 'e' AND 'i' OR c2 > 'g')) OR (pk is not null and (pk <1 or pk>1)) ;
DROP TABLE t1;
diff --git a/mysql-test/main/information_schema.result b/mysql-test/main/information_schema.result
index 51dbad1d628..3534c4bd337 100644
--- a/mysql-test/main/information_schema.result
+++ b/mysql-test/main/information_schema.result
@@ -565,7 +565,7 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION;
create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION;
select * from information_schema.views;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE CHARACTER_SET_CLIENT COLLATION_CONNECTION ALGORITHM
-def mysql user select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` NONE YES mariadb.sys@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
+def mysql user select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` NONE YES mariadb.sys@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
@@ -863,6 +863,7 @@ where data_type = 'longtext' and table_schema != 'performance_schema'
order by binary table_name, ordinal_position;
table_schema table_name column_name
information_schema ALL_PLUGINS PLUGIN_DESCRIPTION
+information_schema CHECK_CONSTRAINTS CHECK_CLAUSE
information_schema COLUMNS COLUMN_DEFAULT
information_schema COLUMNS COLUMN_TYPE
information_schema COLUMNS GENERATION_EXPRESSION
@@ -2222,17 +2223,22 @@ SCHEMA_NAME
# MDEV-14836: Assertion `m_status == DA_ERROR' failed in
# Diagnostics_area::sql_errno upon query from I_S with LIMIT ROWS EXAMINED
#
-SELECT * FROM seq_1_to_100 LIMIT ROWS EXAMINED 10;
-seq
-1
-2
-3
-4
-5
-6
-7
+SELECT * FROM INFORMATION_SCHEMA.`COLUMNS` LIMIT ROWS EXAMINED 10;
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 11 rows, which exceeds LIMIT ROWS EXAMINED (10). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least ### rows, which exceeds LIMIT ROWS EXAMINED (10). The query result may be incomplete
+#
+# MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK ||
+# m_status == DA_OK_BULK' failed in Diagnostics_area::message()
+#
+call mtr.add_suppression("Sort aborted.*");
+DROP DATABASE test;
+CREATE DATABASE test;
+USE test;
+CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema;
+SELECT * FROM v LIMIT ROWS EXAMINED 9;
+ERROR HY000: Sort aborted:
+DROP VIEW v;
#
# End of 10.2 Test
#
@@ -2310,5 +2316,34 @@ create table t2 (n int);
insert into t1 set n = (select table_rows from information_schema.tables where table_name='t2');
drop table t1, t2;
#
+# MDEV-24593 Signal 11 when group by primary key of table joined to information_schema.columns
+#
+create table t1 (f varchar(64) primary key);
+select f from information_schema.columns i
+inner join t1 on f=i.column_name
+group by f;
+f
+drop table t1;
+#
+# MDEV-24929 Server crash in thr_multi_unlock or in
+# get_schema_tables_result upon select from I_S with joins
+#
+CREATE TABLE t1 (a TIMESTAMP, KEY (a));
+INSERT INTO t1 VALUES ('2012-12-12'),('2021-11-11');
+SELECT count(*) FROM t1 AS t1a LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.ROUTINES) ON (t1b.a IS NULL);
+count(*)
+2
+SELECT count(*) FROM t1 AS t1a LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.PROFILING) ON (t1b.a IS NULL);
+count(*)
+2
+DROP TABLE t1;
+#
+# MDEV-24868 Server crashes in optimize_schema_tables_memory_usage after select from information_schema.innodb_sys_columns
+#
+create table t1 ( name varchar(64) character set utf8, len int);
+select * from t1 where (name, len) in (select name, len from information_schema.innodb_sys_columns having len = 8);
+name len
+drop table t1;
+#
# End of 10.3 tests
#
diff --git a/mysql-test/main/information_schema.test b/mysql-test/main/information_schema.test
index d9952538142..c3dbc1f6e6b 100644
--- a/mysql-test/main/information_schema.test
+++ b/mysql-test/main/information_schema.test
@@ -1931,7 +1931,25 @@ SELECT SCHEMA_NAME from information_schema.schemata where schema_name=REPEAT('a'
--echo # Diagnostics_area::sql_errno upon query from I_S with LIMIT ROWS EXAMINED
--echo #
-SELECT * FROM seq_1_to_100 LIMIT ROWS EXAMINED 10;
+replace_regex /at least \d+ rows/at least ### rows/;
+SELECT * FROM INFORMATION_SCHEMA.`COLUMNS` LIMIT ROWS EXAMINED 10;
+
+--echo #
+--echo # MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK ||
+--echo # m_status == DA_OK_BULK' failed in Diagnostics_area::message()
+--echo #
+
+call mtr.add_suppression("Sort aborted.*");
+
+DROP DATABASE test;
+CREATE DATABASE test;
+USE test;
+CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema;
+
+--error ER_FILSORT_ABORT
+SELECT * FROM v LIMIT ROWS EXAMINED 9;
+
+DROP VIEW v;
--echo #
--echo # End of 10.2 Test
@@ -2007,6 +2025,34 @@ create table t2 (n int);
insert into t1 set n = (select table_rows from information_schema.tables where table_name='t2');
drop table t1, t2;
+
+--echo #
+--echo # MDEV-24593 Signal 11 when group by primary key of table joined to information_schema.columns
+--echo #
+create table t1 (f varchar(64) primary key);
+select f from information_schema.columns i
+inner join t1 on f=i.column_name
+group by f;
+drop table t1;
+
+--echo #
+--echo # MDEV-24929 Server crash in thr_multi_unlock or in
+--echo # get_schema_tables_result upon select from I_S with joins
+--echo #
+
+CREATE TABLE t1 (a TIMESTAMP, KEY (a));
+INSERT INTO t1 VALUES ('2012-12-12'),('2021-11-11');
+SELECT count(*) FROM t1 AS t1a LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.ROUTINES) ON (t1b.a IS NULL);
+SELECT count(*) FROM t1 AS t1a LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.PROFILING) ON (t1b.a IS NULL);
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-24868 Server crashes in optimize_schema_tables_memory_usage after select from information_schema.innodb_sys_columns
+--echo #
+create table t1 ( name varchar(64) character set utf8, len int);
+select * from t1 where (name, len) in (select name, len from information_schema.innodb_sys_columns having len = 8);
+drop table t1;
+
--echo #
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/innodb_ext_key.result b/mysql-test/main/innodb_ext_key.result
index 7cc03bee546..6fba116d35d 100644
--- a/mysql-test/main/innodb_ext_key.result
+++ b/mysql-test/main/innodb_ext_key.result
@@ -795,6 +795,21 @@ EXPLAIN
}
drop table t1;
SET optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+#
+# MDEV-11172: EXPLAIN shows non-sensical value for key_len with type=index
+#
+CREATE TABLE t1(a INT);
+INSERT INTO t1 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+CREATE TABLE t2 (
+pk VARCHAR(50),
+a VARCHAR(20),
+KEY k1(a),
+PRIMARY KEY(pk)
+)ENGINE=INNODB;
+INSERT INTO t2 SELECT a,a FROM t1;
+EXPLAIN SELECT pk FROM t2 FORCE INDEX(k1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL k1 23 NULL # Using index
+DROP TABLE t1,t2;
set global innodb_stats_persistent= @innodb_stats_persistent_save;
-set global innodb_stats_persistent_sample_pages=
-@innodb_stats_persistent_sample_pages_save;
+set global innodb_stats_persistent_sample_pages= @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/innodb_ext_key.test b/mysql-test/main/innodb_ext_key.test
index 413d5570be5..1ff0ea51990 100644
--- a/mysql-test/main/innodb_ext_key.test
+++ b/mysql-test/main/innodb_ext_key.test
@@ -620,6 +620,26 @@ select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and
drop table t1;
SET optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+
+--echo #
+--echo # MDEV-11172: EXPLAIN shows non-sensical value for key_len with type=index
+--echo #
+
+CREATE TABLE t1(a INT);
+INSERT INTO t1 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+CREATE TABLE t2 (
+ pk VARCHAR(50),
+ a VARCHAR(20),
+ KEY k1(a),
+ PRIMARY KEY(pk)
+)ENGINE=INNODB;
+
+INSERT INTO t2 SELECT a,a FROM t1;
+--replace_column 9 #
+EXPLAIN SELECT pk FROM t2 FORCE INDEX(k1);
+
+DROP TABLE t1,t2;
+
set global innodb_stats_persistent= @innodb_stats_persistent_save;
-set global innodb_stats_persistent_sample_pages=
- @innodb_stats_persistent_sample_pages_save;
+set global innodb_stats_persistent_sample_pages= @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/innodb_icp.result b/mysql-test/main/innodb_icp.result
index 949bc9a00d8..96ff1964ac1 100644
--- a/mysql-test/main/innodb_icp.result
+++ b/mysql-test/main/innodb_icp.result
@@ -455,11 +455,11 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
-EXPLAIN SELECT pk, c1 FROM t1 WHERE pk <> 3;
+EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where
SET SESSION optimizer_switch='index_condition_pushdown=off';
-SELECT pk, c1 FROM t1 WHERE pk <> 3;
+SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
1 9
2 7
@@ -687,23 +687,23 @@ INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
1 SIMPLE t2 ref a a 515 test.t1.a 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
SET SESSION optimizer_switch='index_condition_pushdown=on';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
1 SIMPLE t2 ref a a 515 test.t1.a 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result
index 290aa492a98..e8fe3a10b18 100644
--- a/mysql-test/main/join_cache.result
+++ b/mysql-test/main/join_cache.result
@@ -6069,6 +6069,93 @@ f2
drop table t1, t2;
set join_buffer_size=@save_join_buffer_size;
#
+# MDEV-21104: BNLH used for multi-join query with embedded outer join
+# and possible 'not exists' optimization
+#
+set join_cache_level=4;
+CREATE TABLE t1 (a int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int, c int) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,2),(2,4);
+CREATE TABLE t3 (d int, KEY(d)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (1),(2);
+CREATE TABLE t4 (e int primary key) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1),(2);
+ANALYZE TABLE t1,t2,t3,t4;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
+test.t4 analyze status OK
+SELECT * FROM t2 LEFT JOIN t3 ON c = d;
+b c d
+1 2 2
+2 4 NULL
+SELECT * FROM (t2 LEFT JOIN t3 ON c = d ) JOIN t4;
+b c d e
+1 2 2 1
+2 4 NULL 1
+1 2 2 2
+2 4 NULL 2
+EXPLAIN SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t3 hash_index d #hash#d:d 5:5 test.t2.c 2 Using where; Using index; Using join buffer (incremental, BNLH join)
+1 SIMPLE t4 hash_index PRIMARY #hash#PRIMARY:PRIMARY 4:4 test.t2.b 2 Using index; Using join buffer (incremental, BNLH join)
+SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e;
+a b c d e
+1 1 2 2 1
+2 1 2 2 1
+1 2 4 NULL 2
+2 2 4 NULL 2
+EXPLAIN SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e
+WHERE e IS NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t3 hash_index d #hash#d:d 5:5 test.t2.c 2 Using where; Using index; Using join buffer (incremental, BNLH join)
+1 SIMPLE t4 hash_index PRIMARY #hash#PRIMARY:PRIMARY 4:4 test.t2.b 2 Using where; Using index; Not exists; Using join buffer (incremental, BNLH join)
+SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e
+WHERE e IS NULL;
+a b c d e
+DROP TABLE t1,t2,t3,t4;
+set join_cache_level=@save_join_cache_level;
+#
+# MDEV-24767: forced BNLH used for equi-join supported by compound index
+#
+create table t1 (a int, b int, c int ) engine=myisam ;
+create table t2 (a int, b int, c int, primary key (c,a,b)) engine=myisam ;
+insert into t1 values (3,4,2), (5,6,4);
+insert into t2 values (3,4,2), (5,6,4);
+select t1.a, t1.b, t1.c from t1,t2
+where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
+a b c
+3 4 2
+5 6 4
+explain select t1.a, t1.b, t1.c from t1,t2
+where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 12 test.t1.c,test.t1.a,test.t1.b 1 Using index
+set join_cache_level=3;
+select t1.a, t1.b, t1.c from t1,t2
+where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
+a b c
+3 4 2
+5 6 4
+explain select t1.a, t1.b, t1.c from t1,t2
+where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
+1 SIMPLE t2 hash_index PRIMARY #hash#PRIMARY:PRIMARY 12:12 test.t1.c,test.t1.a,test.t1.b 2 Using index; Using join buffer (flat, BNLH join)
+drop table t1,t2;
+set join_cache_level=@save_join_cache_level;
+#
# MDEV-21243: Join buffer: condition is checked in wrong place for range access
#
create table t1(a int primary key);
@@ -6141,6 +6228,7 @@ EXPLAIN
}
}
drop table t1,t2,t3;
+# End of 10.3 tests
set @@optimizer_switch=@save_optimizer_switch;
set global innodb_stats_persistent= @innodb_stats_persistent_save;
set global innodb_stats_persistent_sample_pages=
diff --git a/mysql-test/main/join_cache.test b/mysql-test/main/join_cache.test
index ce84ccb56fc..b4271f648e3 100644
--- a/mysql-test/main/join_cache.test
+++ b/mysql-test/main/join_cache.test
@@ -4071,6 +4071,65 @@ set join_buffer_size=@save_join_buffer_size;
--echo #
+--echo # MDEV-21104: BNLH used for multi-join query with embedded outer join
+--echo # and possible 'not exists' optimization
+--echo #
+
+set join_cache_level=4;
+
+CREATE TABLE t1 (a int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int, c int) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,2),(2,4);
+CREATE TABLE t3 (d int, KEY(d)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (1),(2);
+CREATE TABLE t4 (e int primary key) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1),(2);
+ANALYZE TABLE t1,t2,t3,t4;
+
+SELECT * FROM t2 LEFT JOIN t3 ON c = d;
+SELECT * FROM (t2 LEFT JOIN t3 ON c = d ) JOIN t4;
+
+let $q1=
+SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e;
+eval EXPLAIN $q1;
+eval $q1;
+
+let $q2=
+SELECT * FROM t1 LEFT JOIN ( ( t2 LEFT JOIN t3 ON c = d ) JOIN t4 ) ON b = e
+ WHERE e IS NULL;
+eval EXPLAIN $q2;
+eval $q2;
+
+DROP TABLE t1,t2,t3,t4;
+
+set join_cache_level=@save_join_cache_level;
+
+--echo #
+--echo # MDEV-24767: forced BNLH used for equi-join supported by compound index
+--echo #
+
+create table t1 (a int, b int, c int ) engine=myisam ;
+create table t2 (a int, b int, c int, primary key (c,a,b)) engine=myisam ;
+insert into t1 values (3,4,2), (5,6,4);
+insert into t2 values (3,4,2), (5,6,4);
+
+let $q=
+select t1.a, t1.b, t1.c from t1,t2
+ where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
+
+eval $q;
+eval explain $q;
+
+set join_cache_level=3;
+eval $q;
+eval explain $q;
+
+drop table t1,t2;
+
+set join_cache_level=@save_join_cache_level;
+
+--echo #
--echo # MDEV-21243: Join buffer: condition is checked in wrong place for range access
--echo #
create table t1(a int primary key);
@@ -4109,7 +4168,9 @@ where
drop table t1,t2,t3;
-# The following command must be the last one in the file
+--echo # End of 10.3 tests
+
+# The following command must be the last one in the file
set @@optimizer_switch=@save_optimizer_switch;
set global innodb_stats_persistent= @innodb_stats_persistent_save;
diff --git a/mysql-test/main/join_outer.result b/mysql-test/main/join_outer.result
index c92b8e6115b..b7fcb55e4fe 100644
--- a/mysql-test/main/join_outer.result
+++ b/mysql-test/main/join_outer.result
@@ -2687,6 +2687,77 @@ id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
DROP TABLE t1,t2,t3,t4;
# end of 10.1 tests
#
+# MDEV-25362: name resolution for subqueries in ON expressions
+#
+create table t1 (a int, b int);
+create table t2 (c int, d int);
+create table t3 (e int, f int);
+create table t4 (g int, h int);
+explain
+select *
+from
+t1 left join
+(t2
+join
+t3 on
+(t3.f=t1.a)
+) on (t2.c=t1.a );
+ERROR 42S22: Unknown column 't1.a' in 'on clause'
+explain
+select *
+from
+t1 left join
+(t2
+join
+t3 on
+(t3.f=(select max(g) from t4 where t4.h=t1.a))
+) on (t2.c=t1.a );
+ERROR 42S22: Unknown column 't1.a' in 'where clause'
+drop table t1,t2,t3,t4;
+create table t1 (a int);
+insert into t1 values (1),(2);
+create table t2 (b int);
+insert into t2 values (1),(2);
+create table t3 (c int);
+insert into t3 values (1),(2);
+select * from ( select * from t1 left join t2
+on b in (select x from t3 as sq1)
+) as sq2;
+ERROR 42S22: Unknown column 'x' in 'field list'
+drop table t1,t2,t3;
+# end of 10.2 tests
+#
+# MDEV-22866: Crash in join optimizer with constant outer join nest
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
+CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (5),(6);
+CREATE TABLE t5 (e INT) ENGINE=MyISAM;
+INSERT INTO t5 VALUES (7),(8);
+CREATE TABLE t6 (f INT) ENGINE=MyISAM;
+INSERT INTO t6 VALUES (9),(10);
+SELECT *
+FROM
+t1
+LEFT JOIN (
+t2 LEFT JOIN (
+t3 JOIN
+t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
+) ON t2.b >= t4.d
+) ON t1.a <= t2.b
+LEFT JOIN t5 ON t2.b = t5.e
+LEFT JOIN t6 ON t3.c = t6.f;
+a b c d e f
+1 3 NULL NULL NULL NULL
+1 4 NULL NULL NULL NULL
+2 3 NULL NULL NULL NULL
+2 4 NULL NULL NULL NULL
+drop table t1,t2,t3,t4,t5,t6;
+#
# MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
#
create table t1(a int);
@@ -2754,35 +2825,5 @@ WHERE t3.pk IN (2);
1
drop view v4;
drop table t1,t2,t3,t4;
+# end of 10.3 tests
SET optimizer_switch=@org_optimizer_switch;
-#
-# MDEV-22866: Crash in join optimizer with constant outer join nest
-#
-CREATE TABLE t1 (a INT) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1),(2);
-CREATE TABLE t2 (b INT) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (3),(4);
-CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
-CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
-INSERT INTO t4 VALUES (5),(6);
-CREATE TABLE t5 (e INT) ENGINE=MyISAM;
-INSERT INTO t5 VALUES (7),(8);
-CREATE TABLE t6 (f INT) ENGINE=MyISAM;
-INSERT INTO t6 VALUES (9),(10);
-SELECT *
-FROM
-t1
-LEFT JOIN (
-t2 LEFT JOIN (
-t3 JOIN
-t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
-) ON t2.b >= t4.d
-) ON t1.a <= t2.b
-LEFT JOIN t5 ON t2.b = t5.e
-LEFT JOIN t6 ON t3.c = t6.f;
-a b c d e f
-1 3 NULL NULL NULL NULL
-2 3 NULL NULL NULL NULL
-1 4 NULL NULL NULL NULL
-2 4 NULL NULL NULL NULL
-drop table t1,t2,t3,t4,t5,t6;
diff --git a/mysql-test/main/join_outer.test b/mysql-test/main/join_outer.test
index f835d8af5a8..82c7b265b56 100644
--- a/mysql-test/main/join_outer.test
+++ b/mysql-test/main/join_outer.test
@@ -2192,6 +2192,91 @@ DROP TABLE t1,t2,t3,t4;
--echo # end of 10.1 tests
--echo #
+--echo # MDEV-25362: name resolution for subqueries in ON expressions
+--echo #
+
+create table t1 (a int, b int);
+create table t2 (c int, d int);
+create table t3 (e int, f int);
+create table t4 (g int, h int);
+
+--error ER_BAD_FIELD_ERROR
+explain
+select *
+from
+ t1 left join
+ (t2
+ join
+ t3 on
+ (t3.f=t1.a)
+ ) on (t2.c=t1.a );
+
+# This must produce an error:
+--error ER_BAD_FIELD_ERROR
+explain
+select *
+from
+ t1 left join
+ (t2
+ join
+ t3 on
+ (t3.f=(select max(g) from t4 where t4.h=t1.a))
+ ) on (t2.c=t1.a );
+
+drop table t1,t2,t3,t4;
+
+create table t1 (a int);
+insert into t1 values (1),(2);
+create table t2 (b int);
+insert into t2 values (1),(2);
+create table t3 (c int);
+insert into t3 values (1),(2);
+
+--error ER_BAD_FIELD_ERROR
+select * from ( select * from t1 left join t2
+ on b in (select x from t3 as sq1)
+ ) as sq2;
+
+drop table t1,t2,t3;
+
+--echo # end of 10.2 tests
+
+--echo #
+--echo # MDEV-22866: Crash in join optimizer with constant outer join nest
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+
+CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
+
+CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (5),(6);
+
+CREATE TABLE t5 (e INT) ENGINE=MyISAM;
+INSERT INTO t5 VALUES (7),(8);
+
+CREATE TABLE t6 (f INT) ENGINE=MyISAM;
+INSERT INTO t6 VALUES (9),(10);
+
+SELECT *
+FROM
+ t1
+ LEFT JOIN (
+ t2 LEFT JOIN (
+ t3 JOIN
+ t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
+ ) ON t2.b >= t4.d
+ ) ON t1.a <= t2.b
+ LEFT JOIN t5 ON t2.b = t5.e
+ LEFT JOIN t6 ON t3.c = t6.f;
+
+drop table t1,t2,t3,t4,t5,t6;
+
+--echo #
--echo # MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
--echo #
create table t1(a int);
@@ -2251,39 +2336,6 @@ WHERE t3.pk IN (2);
drop view v4;
drop table t1,t2,t3,t4;
-SET optimizer_switch=@org_optimizer_switch;
-
---echo #
---echo # MDEV-22866: Crash in join optimizer with constant outer join nest
---echo #
-
-CREATE TABLE t1 (a INT) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1),(2);
+--echo # end of 10.3 tests
-CREATE TABLE t2 (b INT) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (3),(4);
-
-CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
-
-CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
-INSERT INTO t4 VALUES (5),(6);
-
-CREATE TABLE t5 (e INT) ENGINE=MyISAM;
-INSERT INTO t5 VALUES (7),(8);
-
-CREATE TABLE t6 (f INT) ENGINE=MyISAM;
-INSERT INTO t6 VALUES (9),(10);
-
-SELECT *
-FROM
- t1
- LEFT JOIN (
- t2 LEFT JOIN (
- t3 JOIN
- t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
- ) ON t2.b >= t4.d
- ) ON t1.a <= t2.b
- LEFT JOIN t5 ON t2.b = t5.e
- LEFT JOIN t6 ON t3.c = t6.f;
-
-drop table t1,t2,t3,t4,t5,t6;
+SET optimizer_switch=@org_optimizer_switch;
diff --git a/mysql-test/main/join_outer_jcl6.result b/mysql-test/main/join_outer_jcl6.result
index 2c7dc7b85bb..3cb846426fe 100644
--- a/mysql-test/main/join_outer_jcl6.result
+++ b/mysql-test/main/join_outer_jcl6.result
@@ -2694,6 +2694,77 @@ id timestamp modifiedBy id REV REVTYPE profile_id id REV person_id id REV
DROP TABLE t1,t2,t3,t4;
# end of 10.1 tests
#
+# MDEV-25362: name resolution for subqueries in ON expressions
+#
+create table t1 (a int, b int);
+create table t2 (c int, d int);
+create table t3 (e int, f int);
+create table t4 (g int, h int);
+explain
+select *
+from
+t1 left join
+(t2
+join
+t3 on
+(t3.f=t1.a)
+) on (t2.c=t1.a );
+ERROR 42S22: Unknown column 't1.a' in 'on clause'
+explain
+select *
+from
+t1 left join
+(t2
+join
+t3 on
+(t3.f=(select max(g) from t4 where t4.h=t1.a))
+) on (t2.c=t1.a );
+ERROR 42S22: Unknown column 't1.a' in 'where clause'
+drop table t1,t2,t3,t4;
+create table t1 (a int);
+insert into t1 values (1),(2);
+create table t2 (b int);
+insert into t2 values (1),(2);
+create table t3 (c int);
+insert into t3 values (1),(2);
+select * from ( select * from t1 left join t2
+on b in (select x from t3 as sq1)
+) as sq2;
+ERROR 42S22: Unknown column 'x' in 'field list'
+drop table t1,t2,t3;
+# end of 10.2 tests
+#
+# MDEV-22866: Crash in join optimizer with constant outer join nest
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
+CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (5),(6);
+CREATE TABLE t5 (e INT) ENGINE=MyISAM;
+INSERT INTO t5 VALUES (7),(8);
+CREATE TABLE t6 (f INT) ENGINE=MyISAM;
+INSERT INTO t6 VALUES (9),(10);
+SELECT *
+FROM
+t1
+LEFT JOIN (
+t2 LEFT JOIN (
+t3 JOIN
+t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
+) ON t2.b >= t4.d
+) ON t1.a <= t2.b
+LEFT JOIN t5 ON t2.b = t5.e
+LEFT JOIN t6 ON t3.c = t6.f;
+a b c d e f
+1 3 NULL NULL NULL NULL
+2 3 NULL NULL NULL NULL
+1 4 NULL NULL NULL NULL
+2 4 NULL NULL NULL NULL
+drop table t1,t2,t3,t4,t5,t6;
+#
# MDEV-17518: Range optimization doesn't use ON expressions from nested outer joins
#
create table t1(a int);
@@ -2761,35 +2832,5 @@ WHERE t3.pk IN (2);
1
drop view v4;
drop table t1,t2,t3,t4;
+# end of 10.3 tests
SET optimizer_switch=@org_optimizer_switch;
-#
-# MDEV-22866: Crash in join optimizer with constant outer join nest
-#
-CREATE TABLE t1 (a INT) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1),(2);
-CREATE TABLE t2 (b INT) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (3),(4);
-CREATE TABLE t3 (c INT, KEY(c)) ENGINE=MyISAM;
-CREATE TABLE t4 (d INT, KEY(d)) ENGINE=MyISAM;
-INSERT INTO t4 VALUES (5),(6);
-CREATE TABLE t5 (e INT) ENGINE=MyISAM;
-INSERT INTO t5 VALUES (7),(8);
-CREATE TABLE t6 (f INT) ENGINE=MyISAM;
-INSERT INTO t6 VALUES (9),(10);
-SELECT *
-FROM
-t1
-LEFT JOIN (
-t2 LEFT JOIN (
-t3 JOIN
-t4 ON t3.c = t4.d and t3.c >2 and t3.c<0
-) ON t2.b >= t4.d
-) ON t1.a <= t2.b
-LEFT JOIN t5 ON t2.b = t5.e
-LEFT JOIN t6 ON t3.c = t6.f;
-a b c d e f
-1 3 NULL NULL NULL NULL
-2 3 NULL NULL NULL NULL
-1 4 NULL NULL NULL NULL
-2 4 NULL NULL NULL NULL
-drop table t1,t2,t3,t4,t5,t6;
diff --git a/mysql-test/main/kill.result b/mysql-test/main/kill.result
index 1ea06aee096..cbb0598485f 100644
--- a/mysql-test/main/kill.result
+++ b/mysql-test/main/kill.result
@@ -413,3 +413,8 @@ ALTER TABLE t2 DROP c;
UNLOCK TABLES;
DROP VIEW v1;
DROP TABLE t1, t2;
+#
+# KILL QUERY ID USER
+#
+kill query id user 'foo';
+ERROR 42S22: Unknown column 'user' in 'field list'
diff --git a/mysql-test/main/kill.test b/mysql-test/main/kill.test
index c5bbd349574..05e70ff8651 100644
--- a/mysql-test/main/kill.test
+++ b/mysql-test/main/kill.test
@@ -664,3 +664,9 @@ ALTER TABLE t2 DROP c;
UNLOCK TABLES;
DROP VIEW v1;
DROP TABLE t1, t2;
+
+--echo #
+--echo # KILL QUERY ID USER
+--echo #
+--error ER_BAD_FIELD_ERROR
+kill query id user 'foo';
diff --git a/mysql-test/main/lock_user.result b/mysql-test/main/lock_user.result
index 7d9aeebb7aa..560ae6ce425 100644
--- a/mysql-test/main/lock_user.result
+++ b/mysql-test/main/lock_user.result
@@ -156,6 +156,7 @@ alter user user1@localhost PASSWORD EXPIRE NEVER ACCOUNT UNLOCK ;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE NEVER
alter user user1@localhost ACCOUNT LOCK PASSWORD EXPIRE DEFAULT;
show create user user1@localhost;
CREATE USER for user1@localhost
@@ -167,5 +168,6 @@ localhost user1 {"access":0,"plugin":"mysql_native_password","authentication_str
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE INTERVAL 60 DAY
drop user user1@localhost;
drop user user2@localhost;
diff --git a/mysql-test/main/lock_view.result b/mysql-test/main/lock_view.result
index 4d375bace42..364c2cddf60 100644
--- a/mysql-test/main/lock_view.result
+++ b/mysql-test/main/lock_view.result
@@ -229,3 +229,37 @@ drop user definer@localhost;
drop database mysqltest1;
drop database mysqltest2;
drop database mysqltest3;
+#
+# MDEV-24331 mysqldump fails with "Got error: 1356" if the database contains a view with a subquery
+#
+create user u1@localhost;
+grant all privileges on test.* to u1@localhost;
+connect con1,localhost,u1;
+use test;
+create table t1 (id int not null);
+create view v1 as select * from (select * from t1) dt;
+lock table v1 read;
+disconnect con1;
+connection default;
+SET @saved_cs_client = @@character_set_client;
+SET character_set_client = utf8;
+/*!50001 CREATE TABLE `v1` (
+ `id` tinyint NOT NULL
+) ENGINE=MyISAM */;
+SET character_set_client = @saved_cs_client;
+/*!50001 DROP TABLE IF EXISTS `v1`*/;
+/*!50001 SET @saved_cs_client = @@character_set_client */;
+/*!50001 SET @saved_cs_results = @@character_set_results */;
+/*!50001 SET @saved_col_connection = @@collation_connection */;
+/*!50001 SET character_set_client = latin1 */;
+/*!50001 SET character_set_results = latin1 */;
+/*!50001 SET collation_connection = latin1_swedish_ci */;
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`u1`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `dt`.`id` AS `id` from (select `test`.`t1`.`id` AS `id` from `test`.`t1`) `dt` */;
+/*!50001 SET character_set_client = @saved_cs_client */;
+/*!50001 SET character_set_results = @saved_cs_results */;
+/*!50001 SET collation_connection = @saved_col_connection */;
+drop view v1;
+drop table t1;
+drop user u1@localhost;
diff --git a/mysql-test/main/lock_view.test b/mysql-test/main/lock_view.test
index 4b1adac5be1..abb8d317946 100644
--- a/mysql-test/main/lock_view.test
+++ b/mysql-test/main/lock_view.test
@@ -75,3 +75,20 @@ drop user definer@localhost;
drop database mysqltest1;
drop database mysqltest2;
drop database mysqltest3;
+
+--echo #
+--echo # MDEV-24331 mysqldump fails with "Got error: 1356" if the database contains a view with a subquery
+--echo #
+create user u1@localhost;
+grant all privileges on test.* to u1@localhost;
+connect con1,localhost,u1;
+use test;
+create table t1 (id int not null);
+create view v1 as select * from (select * from t1) dt;
+lock table v1 read;
+disconnect con1;
+connection default;
+exec $MYSQL_DUMP test v1 -uu1 --compact;
+drop view v1;
+drop table t1;
+drop user u1@localhost;
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
index efa0444fff5..eb61a080c96 100644
--- a/mysql-test/main/long_unique.result
+++ b/mysql-test/main/long_unique.result
@@ -1439,7 +1439,7 @@ drop table t1;
create table t1(a varchar(4000));
alter table t1 add index(a);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result
index 72dab5bb181..5d6c0562c8a 100644
--- a/mysql-test/main/long_unique_bugs.result
+++ b/mysql-test/main/long_unique_bugs.result
@@ -148,7 +148,7 @@ ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY
ALTER TABLE t1 ADD KEY idx2(f);
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
DROP TABLE t1;
CREATE TABLE t1(a blob , b blob , unique(a,b));
alter table t1 drop column b;
diff --git a/mysql-test/main/mix2_myisam.result b/mysql-test/main/mix2_myisam.result
index 5acec2616fa..220d14f3b0e 100644
--- a/mysql-test/main/mix2_myisam.result
+++ b/mysql-test/main/mix2_myisam.result
@@ -1990,7 +1990,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table t1;
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t1 (v varchar(65536));
diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result
index d792b2828b8..4001a47ecd4 100644
--- a/mysql-test/main/multi_update.result
+++ b/mysql-test/main/multi_update.result
@@ -982,6 +982,59 @@ drop function f1;
#
# end of 5.5 tests
#
+#
+# MDEV-24823: Invalid multi-table update of view within SP
+#
+create table t1 (id int) engine=myisam;
+insert into t1 values (1),(2),(1);
+create table t2 (pk int, c0 int) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create view v2 as select * from t2;
+create view v3 as select * from t2 where c0 < 3;
+create procedure sp0() update t1, v2 set v2.pk = 1 where v2.c0 = t1.c1;
+call sp0();
+ERROR 42S22: Unknown column 't1.c1' in 'where clause'
+call sp0();
+ERROR 42S22: Unknown column 't1.c1' in 'where clause'
+create procedure sp1() update (t1 join v2 on v2.c0 = t1.c1) set v2.pk = 1;
+call sp1();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+call sp1();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+create procedure sp2() update (t1 join v3 on v3.c0 = t1.c1) set v3.pk = 1;
+call sp2();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+call sp2();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+create procedure sp3()
+update (t1 join v2 on v2.c0 = t1.id) set v2.c0 = v2.c0+1;
+select * from t2;
+pk c0
+1 1
+2 3
+call sp3();
+select * from t2;
+pk c0
+1 2
+2 3
+call sp3();
+select * from t2;
+pk c0
+1 3
+2 3
+create procedure sp4() delete t1 from t1 join v2 on v2.c0 = t1.c1;
+call sp4();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+call sp4();
+ERROR 42S22: Unknown column 't1.c1' in 'on clause'
+drop procedure sp0;
+drop procedure sp1;
+drop procedure sp2;
+drop procedure sp3;
+drop procedure sp4;
+drop view v2,v3;
+drop table t1,t2;
+# End of 10.2 tests
create table t1 (c1 int, c3 int);
insert t1(c3) values (1), (2), (3), (4), (5), (6), (7), (8);
create table t2 select * from t1;
diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test
index 8a32f626818..84f06a7c165 100644
--- a/mysql-test/main/multi_update.test
+++ b/mysql-test/main/multi_update.test
@@ -958,6 +958,59 @@ drop function f1;
--echo # end of 5.5 tests
--echo #
+--echo #
+--echo # MDEV-24823: Invalid multi-table update of view within SP
+--echo #
+
+create table t1 (id int) engine=myisam;
+insert into t1 values (1),(2),(1);
+create table t2 (pk int, c0 int) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create view v2 as select * from t2;
+create view v3 as select * from t2 where c0 < 3;
+
+create procedure sp0() update t1, v2 set v2.pk = 1 where v2.c0 = t1.c1;
+--error ER_BAD_FIELD_ERROR
+call sp0();
+--error ER_BAD_FIELD_ERROR
+call sp0();
+
+create procedure sp1() update (t1 join v2 on v2.c0 = t1.c1) set v2.pk = 1;
+--error ER_BAD_FIELD_ERROR
+call sp1();
+--error ER_BAD_FIELD_ERROR
+call sp1();
+
+create procedure sp2() update (t1 join v3 on v3.c0 = t1.c1) set v3.pk = 1;
+--error ER_BAD_FIELD_ERROR
+call sp2();
+--error ER_BAD_FIELD_ERROR
+call sp2();
+
+create procedure sp3()
+update (t1 join v2 on v2.c0 = t1.id) set v2.c0 = v2.c0+1;
+select * from t2;
+call sp3();
+select * from t2;
+call sp3();
+select * from t2;
+
+create procedure sp4() delete t1 from t1 join v2 on v2.c0 = t1.c1;
+--error ER_BAD_FIELD_ERROR
+call sp4();
+--error ER_BAD_FIELD_ERROR
+call sp4();
+
+drop procedure sp0;
+drop procedure sp1;
+drop procedure sp2;
+drop procedure sp3;
+drop procedure sp4;
+drop view v2,v3;
+drop table t1,t2;
+
+--echo # End of 10.2 tests
+
#
# MDEV-13911 Support ORDER BY and LIMIT in multi-table update
#
diff --git a/mysql-test/main/myisam.result b/mysql-test/main/myisam.result
index 7556e64936a..32737bd8399 100644
--- a/mysql-test/main/myisam.result
+++ b/mysql-test/main/myisam.result
@@ -1706,7 +1706,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table if exists t1;
set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for
create table t1 (v varchar(65536));
@@ -1980,7 +1980,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1990,7 +1990,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1024);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2000,7 +2000,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2046,7 +2046,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2136,7 +2136,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2166,7 +2166,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2412,6 +2412,7 @@ Key Start Len Index Type
1 2 30 multip. varchar
2 33 30 multip. char NULL
DROP TABLE t1;
+set statement sql_mode='' for
create table t1 (n int not null, c char(1)) transactional=1;
Warnings:
Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1'
diff --git a/mysql-test/main/myisam.test b/mysql-test/main/myisam.test
index 0b153514ecb..9b81ab197ae 100644
--- a/mysql-test/main/myisam.test
+++ b/mysql-test/main/myisam.test
@@ -1557,6 +1557,7 @@ DROP TABLE t1;
# MariaDB: Note that the table will still have 'TRANSACTIONAL=1' attribute.
# That's the intended behavior atm.
#
+set statement sql_mode='' for
create table t1 (n int not null, c char(1)) transactional=1;
show create table t1;
drop table t1;
diff --git a/mysql-test/main/myisam_icp.result b/mysql-test/main/myisam_icp.result
index 7f34b274764..68770229926 100644
--- a/mysql-test/main/myisam_icp.result
+++ b/mysql-test/main/myisam_icp.result
@@ -448,11 +448,11 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
-EXPLAIN SELECT pk, c1 FROM t1 WHERE pk <> 3;
+EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 5 Using where
SET SESSION optimizer_switch='index_condition_pushdown=off';
-SELECT pk, c1 FROM t1 WHERE pk <> 3;
+SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
1 9
2 7
@@ -680,23 +680,23 @@ INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
1 SIMPLE t2 ref a a 515 const 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
SET SESSION optimizer_switch='index_condition_pushdown=on';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
1 SIMPLE t2 ref a a 515 const 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
diff --git a/mysql-test/main/mysql_upgrade.result b/mysql-test/main/mysql_upgrade.result
index 6d5df6d90de..596b2673d82 100644
--- a/mysql-test/main/mysql_upgrade.result
+++ b/mysql-test/main/mysql_upgrade.result
@@ -484,8 +484,6 @@ even_longer_user_name_number_3_to_test_the_grantor_and_definer_field_length@loca
DROP USER very_long_user_name_number_1, very_long_user_name_number_2, even_longer_user_name_number_3_to_test_the_grantor_and_definer_field_length@localhost;
DROP PROCEDURE test.pr;
use test;
-call mtr.add_suppression('Column last_update in table `mysql`.`innodb_table_stats` is INT NOT NULL but should be');
-alter table mysql.innodb_table_stats modify last_update int not null;
create table extralongname_extralongname_extralongname_extralongname_ext (
id int(10) unsigned not null,
created_date date not null,
@@ -677,7 +675,7 @@ Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
SHOW CREATE TABLE mysql.user;
View Create View character_set_client collation_connection
-user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `mysql`.`user` AS select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` latin1 latin1_swedish_ci
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `mysql`.`user` AS select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` latin1 latin1_swedish_ci
CREATE ROLE `aRole`;
SET ROLE `aRole`;
FLUSH PRIVILEGES;
@@ -690,8 +688,88 @@ root N
root N
root N
aRole Y
+DROP ROLE aRole;
+#
+# MDEV-24122 Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201
+#
+# switching from mysql.global_priv to mysql.user
+drop view mysql.user_bak;
+drop table mysql.user;
+truncate table mysql.tables_priv;
+FLUSH TABLES mysql.user;
+ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL;
+ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
+FLUSH PRIVILEGES;
+Phase 1/7: Checking and upgrading mysql database
+Processing databases
+mysql
+mysql.column_stats OK
+mysql.columns_priv OK
+mysql.db OK
+mysql.event OK
+mysql.func OK
+mysql.global_priv_bak OK
+mysql.gtid_slave_pos OK
+mysql.help_category OK
+mysql.help_keyword OK
+mysql.help_relation OK
+mysql.help_topic OK
+mysql.index_stats OK
+mysql.innodb_index_stats OK
+mysql.innodb_table_stats OK
+mysql.plugin OK
+mysql.proc OK
+mysql.procs_priv OK
+mysql.proxies_priv OK
+mysql.roles_mapping OK
+mysql.servers OK
+mysql.table_stats OK
+mysql.tables_priv OK
+mysql.time_zone OK
+mysql.time_zone_leap_second OK
+mysql.time_zone_name OK
+mysql.time_zone_transition OK
+mysql.time_zone_transition_type OK
+mysql.transaction_registry OK
+mysql.user OK
+Phase 2/7: Installing used storage engines... Skipped
+Phase 3/7: Fixing views
+Phase 4/7: Running 'mysql_fix_privilege_tables'
+Phase 5/7: Fixing table and database names
+Phase 6/7: Checking and upgrading tables
+Processing databases
+information_schema
+mtr
+mtr.global_suppressions OK
+mtr.test_suppressions OK
+performance_schema
+test
+Phase 7/7: Running 'FLUSH PRIVILEGES'
+OK
+SHOW CREATE TABLE mysql.user;
+View Create View character_set_client collation_connection
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `mysql`.`user` AS select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` latin1 latin1_swedish_ci
+CREATE ROLE `aRole`;
+SET DEFAULT ROLE aRole;
+SHOW GRANTS;
+Grants for root@localhost
+GRANT `aRole` TO `root`@`localhost` WITH ADMIN OPTION
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+GRANT USAGE ON *.* TO `aRole`
+SET DEFAULT ROLE aRole FOR 'root'@'localhost'
+SET DEFAULT ROLE NONE;
+SHOW GRANTS;
+Grants for root@localhost
+GRANT `aRole` TO `root`@`localhost` WITH ADMIN OPTION
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+GRANT USAGE ON *.* TO `aRole`
DROP ROLE `aRole`;
FLUSH PRIVILEGES;
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
# End of 10.2 tests
#
# Ensure that mysql_upgrade correctly sets truncate_versioning_priv
diff --git a/mysql-test/main/mysql_upgrade.test b/mysql-test/main/mysql_upgrade.test
index 168104660e7..7d673d2bb4d 100644
--- a/mysql-test/main/mysql_upgrade.test
+++ b/mysql-test/main/mysql_upgrade.test
@@ -172,8 +172,6 @@ DROP PROCEDURE test.pr;
# MDEV-13274 mysql_upgrade fails if dbname+tablename+partioname > 64 chars
#
use test;
-call mtr.add_suppression('Column last_update in table `mysql`.`innodb_table_stats` is INT NOT NULL but should be');
-alter table mysql.innodb_table_stats modify last_update int not null;
create table extralongname_extralongname_extralongname_extralongname_ext (
id int(10) unsigned not null,
@@ -245,10 +243,42 @@ FLUSH PRIVILEGES;
SET ROLE `aRole`;
SELECT `User`, `is_role` FROM `mysql`.`user`;
+DROP ROLE aRole;
+
+--echo #
+--echo # MDEV-24122 Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201
+--echo #
+
+--source include/switch_to_mysql_user.inc
+drop view mysql.user_bak;
+drop table mysql.user;
+truncate table mysql.tables_priv;
+--copy_file std_data/mysql57user.frm $MYSQLD_DATADIR/mysql/user.frm
+--copy_file std_data/mysql57user.MYI $MYSQLD_DATADIR/mysql/user.MYI
+--copy_file std_data/mysql57user.MYD $MYSQLD_DATADIR/mysql/user.MYD
+FLUSH TABLES mysql.user;
+
+# What prior to MDEV-23201 would of done:
+ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL;
+ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
+FLUSH PRIVILEGES;
+
+--exec $MYSQL_UPGRADE --force 2>&1
+SHOW CREATE TABLE mysql.user;
+
+CREATE ROLE `aRole`;
+SET DEFAULT ROLE aRole;
+SHOW GRANTS;
+SET DEFAULT ROLE NONE;
+SHOW GRANTS;
+
DROP ROLE `aRole`;
--exec $MYSQL mysql < $MYSQLTEST_VARDIR/tmp/user.sql
FLUSH PRIVILEGES;
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
--echo # End of 10.2 tests
diff --git a/mysql-test/main/mysqld--help.test b/mysql-test/main/mysqld--help.test
index 6efd088cbdd..6fff3f51006 100644
--- a/mysql-test/main/mysqld--help.test
+++ b/mysql-test/main/mysqld--help.test
@@ -2,6 +2,8 @@
# mysqld --help
#
--source include/not_embedded.inc
+--source include/not_asan.inc
+--source include/not_ubsan.inc
--source include/have_perfschema.inc
--source include/have_profiling.inc
--source include/platform.inc
diff --git a/mysql-test/main/mysqldump-nl.test b/mysql-test/main/mysqldump-nl.test
index 863c846b9a6..4451b0605c2 100644
--- a/mysql-test/main/mysqldump-nl.test
+++ b/mysql-test/main/mysqldump-nl.test
@@ -26,10 +26,10 @@ create procedure sp() select * from `v1
flush tables;
use test;
-exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
+exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
1tsetlqsym';
-exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1
+exec $MYSQL_DUMP --compact --comments --routines --add-drop-database --databases 'mysqltest1
1tsetlqsym' | $MYSQL;
show tables from `mysqltest1
@@ -45,11 +45,11 @@ create database `test\``
show databases like 'test%';
-exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
+exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
\! ls
#';
-exec $MYSQL_DUMP --compact --comment --add-drop-database --databases 'test`' 'test\`
+exec $MYSQL_DUMP --compact --comments --add-drop-database --databases 'test`' 'test\`
\! ls
#' | $MYSQL;
diff --git a/mysql-test/main/mysqldump-system.result b/mysql-test/main/mysqldump-system.result
index d887df81d2b..c443e250e4a 100644
--- a/mysql-test/main/mysqldump-system.result
+++ b/mysql-test/main/mysqldump-system.result
@@ -90,31 +90,31 @@ USE mysql;
LOCK TABLES `column_stats` WRITE;
/*!40000 ALTER TABLE `column_stats` DISABLE KEYS */;
-INSERT INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,254,'DOUBLE_PREC_HB','\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿');
+REPLACE INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,254,'DOUBLE_PREC_HB','\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿');
/*!40000 ALTER TABLE `column_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `index_stats` WRITE;
/*!40000 ALTER TABLE `index_stats` DISABLE KEYS */;
-INSERT INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500);
+REPLACE INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500);
/*!40000 ALTER TABLE `index_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `table_stats` WRITE;
/*!40000 ALTER TABLE `table_stats` DISABLE KEYS */;
-INSERT INTO `table_stats` VALUES ('mysql','tz',393);
+REPLACE INTO `table_stats` VALUES ('mysql','tz',393);
/*!40000 ALTER TABLE `table_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `innodb_index_stats` WRITE;
/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
-INSERT INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index');
+REPLACE INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index');
/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `innodb_table_stats` WRITE;
/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
-INSERT INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0);
+REPLACE INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0);
/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
UNLOCK TABLES;
@@ -122,31 +122,31 @@ USE mysql;
LOCK TABLES `time_zone` WRITE;
/*!40000 ALTER TABLE `time_zone` DISABLE KEYS */;
-INSERT INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N');
+REPLACE INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N');
/*!40000 ALTER TABLE `time_zone` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_name` WRITE;
/*!40000 ALTER TABLE `time_zone_name` DISABLE KEYS */;
-INSERT INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2);
+REPLACE INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2);
/*!40000 ALTER TABLE `time_zone_name` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_leap_second` WRITE;
/*!40000 ALTER TABLE `time_zone_leap_second` DISABLE KEYS */;
-INSERT INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22);
+REPLACE INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22);
/*!40000 ALTER TABLE `time_zone_leap_second` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_transition` WRITE;
/*!40000 ALTER TABLE `time_zone_transition` DISABLE KEYS */;
-INSERT INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1);
+REPLACE INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1);
/*!40000 ALTER TABLE `time_zone_transition` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_transition_type` WRITE;
/*!40000 ALTER TABLE `time_zone_transition_type` DISABLE KEYS */;
-INSERT INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST');
+REPLACE INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST');
/*!40000 ALTER TABLE `time_zone_transition_type` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result
index 19ffca17b1e..d0d054b5473 100644
--- a/mysql-test/main/mysqldump.result
+++ b/mysql-test/main/mysqldump.result
@@ -1,14 +1,10 @@
call mtr.add_suppression("@003f.frm' \\(errno: 22\\)");
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
# Bug#37938 Test "mysqldump" lacks various insert statements
# Turn off concurrent inserts to avoid random errors
# NOTE: We reset the variable back to saved value at the end of test
SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT = 0;
-DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
-drop database if exists mysqldump_test_db;
-drop database if exists db1;
-drop database if exists db2;
-drop view if exists v1, v2, v3;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
INSERT INTO t1 VALUES (1), (2);
<?xml version="1.0"?>
@@ -5726,6 +5722,315 @@ DELIMITER ;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
DROP TABLE t1;
+#
+# MDEV-20939: Race condition between mysqldump import and InnoDB
+# persistent statistics calculation
+#
+#
+# Without --replace and --insert-ignore
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+#
+# With --replace
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_index_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_table_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+#
+# With --insert-ignore
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_index_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_table_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
# End of 10.2 tests
#
# Test for Invisible columns
diff --git a/mysql-test/main/mysqldump.test b/mysql-test/main/mysqldump.test
index f328bf32361..799f24665ee 100644
--- a/mysql-test/main/mysqldump.test
+++ b/mysql-test/main/mysqldump.test
@@ -19,12 +19,7 @@ let collation=utf8_unicode_ci;
# There are tables in 'mysql' database of type innodb
--source include/have_innodb.inc
-# This test is slow on buildbot.
---source include/big_test.inc
-
-disable_query_log;
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
-enable_query_log;
--echo # Bug#37938 Test "mysqldump" lacks various insert statements
--echo # Turn off concurrent inserts to avoid random errors
@@ -32,15 +27,6 @@ enable_query_log;
SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT = 0;
-
---disable_warnings
-DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
-drop database if exists mysqldump_test_db;
-drop database if exists db1;
-drop database if exists db2;
-drop view if exists v1, v2, v3;
---enable_warnings
-
# XML output
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
@@ -2747,6 +2733,29 @@ INSERT INTO t1 (a) VALUES (1),(2),(3);
--exec $MYSQL_DUMP --default-character-set=utf8mb4 --triggers --no-data --no-create-info --add-drop-trigger --skip-comments --databases test
DROP TABLE t1;
+--echo #
+--echo # MDEV-20939: Race condition between mysqldump import and InnoDB
+--echo # persistent statistics calculation
+--echo #
+
+--let $ignore= --ignore-table=mysql.proxies_priv --ignore-table=mysql.user --ignore-table=mysql.global_priv --ignore-table=mysql.column_stats --ignore-table=mysql.columns_priv --ignore-table=mysql.db --ignore-table=mysql.event --ignore-table=mysql.func --ignore-table=mysql.gtid_slave_pos --ignore-table=mysql.help_category --ignore-table=mysql.help_keyword --ignore-table=mysql.help_relation --ignore-table=mysql.help_topic --ignore-table=mysql.host --ignore-table=mysql.index_stats --ignore-table=mysql.plugin --ignore-table=mysql.proc --ignore-table=mysql.procs_priv --ignore-table=mysql.roles_mapping --ignore-table=mysql.servers --ignore-table=mysql.table_stats --ignore-table=mysql.tables_priv --ignore-table=mysql.time_zone --ignore-table=mysql.time_zone_leap_second --ignore-table=mysql.time_zone_name --ignore-table=mysql.time_zone_transition --ignore-table=mysql.time_zone_transition_type --ignore-table=mysql.general_log --ignore-table=mysql.slow_log
+--let $skip_opts= --skip-dump-date --skip-comments
+
+--echo #
+--echo # Without --replace and --insert-ignore
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts mysql
+
+--echo #
+--echo # With --replace
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts --replace mysql
+
+--echo #
+--echo # With --insert-ignore
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts --insert-ignore mysql
+
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index ee1273decf6..0ced5f19e14 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -8591,4 +8591,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
]
DROP TABLE t1;
set optimizer_trace='enabled=off';
+#
+# MDEV-24975 Server consumes extra 4G memory upon querying INFORMATION_SCHEMA.OPTIIMIZER_TRACE
+#
+set max_session_mem_used=1024*1024*1024;
+select count(*) from information_schema.optimizer_trace;
+select * from information_schema.optimizer_trace;
+set max_session_mem_used=default;
+#
# End of 10.4 tests
+#
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 9040b5a54d0..3fae7f34750 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -623,4 +623,17 @@ SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) from IN
DROP TABLE t1;
set optimizer_trace='enabled=off';
+
+--echo #
+--echo # MDEV-24975 Server consumes extra 4G memory upon querying INFORMATION_SCHEMA.OPTIIMIZER_TRACE
+--echo #
+set max_session_mem_used=1024*1024*1024;
+--disable_result_log
+select count(*) from information_schema.optimizer_trace;
+select * from information_schema.optimizer_trace;
+--enable_result_log
+set max_session_mem_used=default;
+
+--echo #
--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/order_by.result b/mysql-test/main/order_by.result
index 22dfcbaa23e..826daf0542f 100644
--- a/mysql-test/main/order_by.result
+++ b/mysql-test/main/order_by.result
@@ -845,18 +845,20 @@ col2 col col
2 2 2
1 3 3
drop table t1, t2;
-create table t1 (a char(25));
+create table t1 (a char(70));
insert into t1 set a = repeat('x', 20);
-insert into t1 set a = concat(repeat('x', 19), 'z');
-insert into t1 set a = concat(repeat('x', 19), 'ab');
-insert into t1 set a = concat(repeat('x', 19), 'aa');
+insert into t1 set a = concat(repeat('x', 63), 'z');
+insert into t1 set a = concat(repeat('x', 63), 'ab');
+insert into t1 set a = concat(repeat('x', 63), 'aa');
set max_sort_length=20;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '20'
select a from t1 order by a;
a
-xxxxxxxxxxxxxxxxxxxab
-xxxxxxxxxxxxxxxxxxxaa
xxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxz
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxab
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxaa
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz
drop table t1;
create table t1 (
`sid` decimal(8,0) default null,
@@ -3317,6 +3319,8 @@ drop table t1;
SET @save_sort_buffer_size= @@sort_buffer_size;
SET @save_max_sort_length= @@max_sort_length;
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SET sort_buffer_size=1024;
CREATE TABLE t1(a INT, b DECIMAL(65), c BLOB);
INSERT INTO t1 SELECT seq, seq, seq from seq_1_to_25;
@@ -3384,6 +3388,8 @@ SET @save_max_sort_length= @@max_sort_length;
SET @save_sort_buffer_size= @@sort_buffer_size;
SET @save_max_length_for_sort_data= @@max_length_for_sort_data;
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SET sort_buffer_size=1024;
SET max_length_for_sort_data=7000;
CREATE TABLE t1(a VARCHAR(64), b VARCHAR(2048))DEFAULT CHARSET=utf8;
@@ -3519,6 +3525,8 @@ INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4);
SET max_length_for_sort_data= 30;
SET sql_select_limit = 3;
SET max_sort_length=8;
+Warnings:
+Warning 1292 Truncated incorrect max_sort_length value: '8'
SELECT * FROM t1 ORDER BY a+1;
a b
1 1
diff --git a/mysql-test/main/order_by.test b/mysql-test/main/order_by.test
index 1269800e79e..1bf353fd69d 100644
--- a/mysql-test/main/order_by.test
+++ b/mysql-test/main/order_by.test
@@ -532,11 +532,11 @@ drop table t1, t2;
# Bug #5428: a problem with small max_sort_length value
#
-create table t1 (a char(25));
+create table t1 (a char(70));
insert into t1 set a = repeat('x', 20);
-insert into t1 set a = concat(repeat('x', 19), 'z');
-insert into t1 set a = concat(repeat('x', 19), 'ab');
-insert into t1 set a = concat(repeat('x', 19), 'aa');
+insert into t1 set a = concat(repeat('x', 63), 'z');
+insert into t1 set a = concat(repeat('x', 63), 'ab');
+insert into t1 set a = concat(repeat('x', 63), 'aa');
set max_sort_length=20;
select a from t1 order by a;
drop table t1;
diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result
index a32f160853b..5ca0e099026 100644
--- a/mysql-test/main/parser.result
+++ b/mysql-test/main/parser.result
@@ -1788,6 +1788,15 @@ EXECUTE IMMEDIATE 'if(`systeminfo /FO LIST';
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '`systeminfo /FO LIST' at line 1
EXECUTE IMMEDIATE 'if(`systeminfo';
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '`systeminfo' at line 1
+#
+# MDEV-23666 Assertion failed in Lex_input_stream::body_utf8_append
+#
+SET @@sql_mode='ANSI_QUOTES';
+EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '"' at line 1
+EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '"abc' at line 1
+SET @@sql_mode=@save_sql_mode;
# End of 10.3 tests
#
# MDEV-19540: 10.4 allow lock options with SELECT in brackets
diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test
index 0bc470ceb9e..d9a6c2df191 100644
--- a/mysql-test/main/parser.test
+++ b/mysql-test/main/parser.test
@@ -1564,6 +1564,21 @@ EXECUTE IMMEDIATE 'if(`systeminfo /FO LIST';
--error ER_PARSE_ERROR
EXECUTE IMMEDIATE 'if(`systeminfo';
+--echo #
+--echo # MDEV-23666 Assertion failed in Lex_input_stream::body_utf8_append
+--echo #
+SET @@sql_mode='ANSI_QUOTES';
+
+# Without a patch execution of the following statements results in assertion
+# in Lex_input_stream::body_utf8_append on parsing the statement
+--error ER_PARSE_ERROR
+EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"';
+
+--error ER_PARSE_ERROR
+EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc';
+
+SET @@sql_mode=@save_sql_mode;
+
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/parser_not_embedded.test b/mysql-test/main/parser_not_embedded.test
index 3ebd23e888e..3af1260f4ad 100644
--- a/mysql-test/main/parser_not_embedded.test
+++ b/mysql-test/main/parser_not_embedded.test
@@ -21,7 +21,7 @@ select 7 as expected, /*!01000 1 + /*!01000 8 + /*!01000 error */ 16 + */ 2 + */
select 4 as expected, /* 1 + /*!01000 8 + */ 2 + */ 4;
EOF
---exec $MYSQL --comment --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
+--exec $MYSQL --comments --force --table test <$MYSQLTEST_VARDIR/tmp/bug39559.sql
--remove_file $MYSQLTEST_VARDIR/tmp/bug39559.sql
--echo # Bug#46527 "COMMIT AND CHAIN RELEASE does not make sense"
diff --git a/mysql-test/main/password_expiration.result b/mysql-test/main/password_expiration.result
index d05f6b3b5d0..897811bb4ad 100644
--- a/mysql-test/main/password_expiration.result
+++ b/mysql-test/main/password_expiration.result
@@ -125,6 +125,7 @@ alter user user1@localhost password expire;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE INTERVAL 123 DAY
set password for user1@localhost= password('');
show create user user1@localhost;
CREATE USER for user1@localhost
@@ -151,10 +152,12 @@ alter user user1@localhost password expire;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE NEVER
flush privileges;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE NEVER
set password for user1@localhost= password('');
alter user user1@localhost password expire default;
show create user user1@localhost;
@@ -184,10 +187,12 @@ alter user user1@localhost password expire;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE NEVER
flush privileges;
show create user user1@localhost;
CREATE USER for user1@localhost
CREATE USER `user1`@`localhost` PASSWORD EXPIRE
+ALTER USER `user1`@`localhost` PASSWORD EXPIRE NEVER
set global disconnect_on_expired_password=ON;
connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
connect con1,localhost,user1;
diff --git a/mysql-test/main/precedence.result b/mysql-test/main/precedence.result
index d5d4e662a8e..fc6579651b4 100644
--- a/mysql-test/main/precedence.result
+++ b/mysql-test/main/precedence.result
@@ -8016,4 +8016,8 @@ create or replace view v1 as select 1 IS TRUE IS FALSE, 2 IS FALSE IS UNKNOWN, 3
Select view_definition from information_schema.views where table_schema='test' and table_name='v1';
view_definition
select 1 is true is false AS `1 IS TRUE IS FALSE`,/*always not null*/ 1 is null AS `2 IS FALSE IS UNKNOWN`,/*always not null*/ 1 is null AS `3 IS UNKNOWN IS NULL`,/*always not null*/ 1 is null is true AS `4 IS NULL IS TRUE`
+create or replace view v1 as select 2 IS TRUE = 3, 2 IS FALSE = 3, 2 IS UNKNOWN = 3, 2 IS NULL = 3, ISNULL(2) = 1;
+Select view_definition from information_schema.views where table_schema='test' and table_name='v1';
+view_definition
+select 2 is true = 3 AS `2 IS TRUE = 3`,2 is false = 3 AS `2 IS FALSE = 3`,/*always not null*/ 1 is null = 3 AS `2 IS UNKNOWN = 3`,/*always not null*/ 1 is null = 3 AS `2 IS NULL = 3`,/*always not null*/ 1 is null = 1 AS `ISNULL(2) = 1`
drop view v1;
diff --git a/mysql-test/main/precedence.test b/mysql-test/main/precedence.test
index ad367c23603..cd7cee4f911 100644
--- a/mysql-test/main/precedence.test
+++ b/mysql-test/main/precedence.test
@@ -4785,4 +4785,7 @@ Select view_definition from information_schema.views where table_schema='test' a
create or replace view v1 as select 1 IS TRUE IS FALSE, 2 IS FALSE IS UNKNOWN, 3 IS UNKNOWN IS NULL, 4 IS NULL IS TRUE;
Select view_definition from information_schema.views where table_schema='test' and table_name='v1';
+create or replace view v1 as select 2 IS TRUE = 3, 2 IS FALSE = 3, 2 IS UNKNOWN = 3, 2 IS NULL = 3, ISNULL(2) = 1;
+Select view_definition from information_schema.views where table_schema='test' and table_name='v1';
+
drop view v1;
diff --git a/mysql-test/main/processlist_notembedded.result b/mysql-test/main/processlist_notembedded.result
index 3c2671ff5e3..d5c25c0a1d9 100644
--- a/mysql-test/main/processlist_notembedded.result
+++ b/mysql-test/main/processlist_notembedded.result
@@ -1,7 +1,7 @@
#
# MDEV-20466: SHOW PROCESSLIST truncates query text on \0 bytes
#
-connect con1,localhost,root,,;
+connect con1,localhost,root;
connection con1;
SET DEBUG_SYNC= 'before_join_optimize SIGNAL in_sync WAIT_FOR go';
connection default;
@@ -13,17 +13,21 @@ User
disconnect con1;
connection default;
SET DEBUG_SYNC = 'RESET';
-End of 5.5 tests
+#
+# End of 5.5 tests
+#
#
# MDEV-23752: SHOW EXPLAIN FOR thd waits for sleep
#
-connect con1,localhost,root,,;
-select sleep(100000);;
+connect con1,localhost,root;
+select sleep(100000);
connection default;
-SHOW EXPLAIN FOR con_id;
+SHOW EXPLAIN FOR $con_id;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1003 select sleep(100000)
-KILL QUERY con_id;
+KILL QUERY $con_id;
+#
# End of 10.2 tests
+#
diff --git a/mysql-test/main/processlist_notembedded.test b/mysql-test/main/processlist_notembedded.test
index 26021040c39..cc577200368 100644
--- a/mysql-test/main/processlist_notembedded.test
+++ b/mysql-test/main/processlist_notembedded.test
@@ -7,7 +7,7 @@ source include/count_sessions.inc;
--echo # MDEV-20466: SHOW PROCESSLIST truncates query text on \0 bytes
--echo #
-connect (con1,localhost,root,,);
+connect con1,localhost,root;
connection con1;
@@ -39,22 +39,22 @@ SET DEBUG_SYNC = 'RESET';
source include/wait_until_count_sessions.inc;
---echo End of 5.5 tests
+--echo #
+--echo # End of 5.5 tests
+--echo #
--echo #
--echo # MDEV-23752: SHOW EXPLAIN FOR thd waits for sleep
--echo #
---connect (con1,localhost,root,,)
+--connect con1,localhost,root
--let $con_id = `SELECT CONNECTION_ID()`
---send select sleep(100000);
+--send select sleep(100000)
--connection default
+evalp SHOW EXPLAIN FOR $con_id;
+evalp KILL QUERY $con_id;
---replace_result $con_id con_id
-eval SHOW EXPLAIN FOR $con_id;
-
---replace_result $con_id con_id
-eval KILL QUERY $con_id;
-
+--echo #
--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index f1972b6b40e..5a436ec5196 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5489,6 +5489,72 @@ ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SHOW DATABASES WHERE ?' USING 0;
Database
#
+# MDEV-24779: main.subselect fails in buildbot with --ps-protocol
+#
+CREATE TABLE t1(a INT);
+PREPARE stmt FROM "SELECT EXISTS(SELECT 1 FROM t1 GROUP BY a IN (select a from t1))";
+EXECUTE stmt;
+EXISTS(SELECT 1 FROM t1 GROUP BY a IN (select a from t1))
+0
+EXECUTE stmt;
+EXISTS(SELECT 1 FROM t1 GROUP BY a IN (select a from t1))
+0
+DROP TABLE t1;
+#
+# MDEV-25006: Failed assertion on executing EXPLAIN DELETE statement as a prepared statement
+#
+CREATE TABLE t1(c1 CHAR(255) PRIMARY KEY);
+PREPARE stmt FROM 'EXPLAIN DELETE b FROM t1 AS a JOIN t1 AS b';
+EXECUTE stmt;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE a system NULL NULL NULL NULL 0 Const row not found
+1 SIMPLE b system NULL NULL NULL NULL 0 Const row not found
+DROP TABLE t1;
+CREATE TABLE t1(a INT);
+PREPARE stmt FROM 'EXPLAIN DELETE FROM t1.* USING t1';
+EXECUTE stmt;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 system NULL NULL NULL NULL 0 Const row not found
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1;
+#
+# MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
+# in case it is executed in PS (prepared statement) mode
+#
+CREATE TABLE t1 (c int);
+CREATE TABLE t2 (d int);
+# EXPLAIN EXTENDED in regular way (not PS mode)
+EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
+SHOW WARNINGS;
+Level Code Message
+Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
+# Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
+# and their content must be the same as in case running the statement
+# in regular way
+PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
+Warnings:
+Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
+EXECUTE stmt;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 system NULL NULL NULL NULL 0 0.00 Const row not found
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
+SHOW WARNINGS;
+Level Code Message
+Note 1276 Field or reference 'test.t1.c' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select (/* select#2 */ select 1 from `test`.`t2` where 0) AS `(SELECT 1 FROM t2 WHERE d = c)` from `test`.`t1`
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2;
+#
# End of 10.2 tests
#
#
@@ -5507,5 +5573,37 @@ DEALLOCATE PREPARE stmt;
DROP VIEW v1;
DROP TABLE t1;
#
+# MDEV-25197: The statement set password=password('') executed in PS mode
+# fails in case it is run by a user with expired password
+#
+CREATE USER user1@localhost PASSWORD EXPIRE;
+SET @disconnect_on_expired_password_save=@@global.disconnect_on_expired_password;
+SET GLOBAL disconnect_on_expired_password=OFF;
+connect con1,localhost,user1;
+connection con1;
+# Check that no regular statement like SELECT can be prepared
+# by a user with an expired password
+PREPARE stmt FROM "SELECT 1";
+ERROR HY000: You must SET PASSWORD before executing this statement
+# Check that the DEALLOCATE PREPARE statement can be run by a user
+# with an expired password
+PREPARE stmt FROM "SET password=password('')";
+DEALLOCATE PREPARE stmt;
+# Check that the SET PASSWORD statement can be executed in PS mode by
+# a user with an expired password
+PREPARE stmt FROM "SET password=password('')";
+EXECUTE stmt;
+PREPARE stmt FROM "SELECT 1";
+# Check that user's password is not expired anymore
+EXECUTE stmt;
+1
+1
+DEALLOCATE PREPARE stmt;
+# Clean up
+disconnect con1;
+connection default;
+SET GLOBAL disconnect_on_expired_password=@disconnect_on_expired_password_save;
+DROP USER user1@localhost;
+#
# End of 10.4 tests
#
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index e702cb76bbb..2a468d33ace 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -4932,6 +4932,50 @@ EXECUTE IMMEDIATE 'SHOW DATABASES WHERE ?' USING DEFAULT;
EXECUTE IMMEDIATE 'SHOW DATABASES WHERE ?' USING 0;
--echo #
+--echo # MDEV-24779: main.subselect fails in buildbot with --ps-protocol
+--echo #
+
+CREATE TABLE t1(a INT);
+PREPARE stmt FROM "SELECT EXISTS(SELECT 1 FROM t1 GROUP BY a IN (select a from t1))";
+EXECUTE stmt;
+EXECUTE stmt;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-25006: Failed assertion on executing EXPLAIN DELETE statement as a prepared statement
+--echo #
+
+CREATE TABLE t1(c1 CHAR(255) PRIMARY KEY);
+PREPARE stmt FROM 'EXPLAIN DELETE b FROM t1 AS a JOIN t1 AS b';
+EXECUTE stmt;
+DROP TABLE t1;
+CREATE TABLE t1(a INT);
+PREPARE stmt FROM 'EXPLAIN DELETE FROM t1.* USING t1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-25108: Running of the EXPLAIN EXTENDED statement produces extra warning
+--echo # in case it is executed in PS (prepared statement) mode
+--echo #
+CREATE TABLE t1 (c int);
+CREATE TABLE t2 (d int);
+
+--echo # EXPLAIN EXTENDED in regular way (not PS mode)
+EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1;
+SHOW WARNINGS;
+
+--echo # Now run the same EXPLAIN EXTENDED in PS mode. Number of warnings
+--echo # and their content must be the same as in case running the statement
+--echo # in regular way
+PREPARE stmt FROM "EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1";
+EXECUTE stmt;
+SHOW WARNINGS;
+
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2;
+--echo #
--echo # End of 10.2 tests
--echo #
@@ -4956,5 +5000,42 @@ DROP VIEW v1;
DROP TABLE t1;
--echo #
+--echo # MDEV-25197: The statement set password=password('') executed in PS mode
+--echo # fails in case it is run by a user with expired password
+--echo #
+CREATE USER user1@localhost PASSWORD EXPIRE;
+
+SET @disconnect_on_expired_password_save=@@global.disconnect_on_expired_password;
+SET GLOBAL disconnect_on_expired_password=OFF;
+
+connect(con1,localhost,user1);
+connection con1;
+--echo # Check that no regular statement like SELECT can be prepared
+--echo # by a user with an expired password
+--error ER_MUST_CHANGE_PASSWORD
+PREPARE stmt FROM "SELECT 1";
+
+--echo # Check that the DEALLOCATE PREPARE statement can be run by a user
+--echo # with an expired password
+PREPARE stmt FROM "SET password=password('')";
+DEALLOCATE PREPARE stmt;
+
+--echo # Check that the SET PASSWORD statement can be executed in PS mode by
+--echo # a user with an expired password
+PREPARE stmt FROM "SET password=password('')";
+EXECUTE stmt;
+PREPARE stmt FROM "SELECT 1";
+--echo # Check that user's password is not expired anymore
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+
+--echo # Clean up
+disconnect con1;
+connection default;
+
+SET GLOBAL disconnect_on_expired_password=@disconnect_on_expired_password_save;
+DROP USER user1@localhost;
+
+--echo #
--echo # End of 10.4 tests
--echo #
diff --git a/mysql-test/main/ps_show_log.result b/mysql-test/main/ps_show_log.result
new file mode 100644
index 00000000000..54eabaeded6
--- /dev/null
+++ b/mysql-test/main/ps_show_log.result
@@ -0,0 +1,65 @@
+#
+# MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared
+# statement protocol yet
+#
+CREATE USER u1;
+include/master-slave.inc
+[connection master]
+connection master;
+CREATE TABLE t1(n INT);
+DROP TABLE t1;
+connection slave;
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+# # Format_desc # # #
+# # Gtid_list # # []
+# # Binlog_checkpoint # # #
+# # Gtid # # GTID 0-1-1
+# # Query # # use `test`; CREATE TABLE t1(n INT)
+# # Gtid # # GTID 0-1-2
+# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+# Execute the same prepared statement the second time to check that
+# no internal structures used for handling the statement
+# 'SHOW BINLOG EVENTS' were damaged.
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+# # Format_desc # # #
+# # Gtid_list # # []
+# # Binlog_checkpoint # # #
+# # Gtid # # GTID 0-1-1
+# # Query # # use `test`; CREATE TABLE t1(n INT)
+# # Gtid # # GTID 0-1-2
+# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+DEALLOCATE PREPARE stmt_1;
+connection slave;
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000001 # Format_desc # # #
+slave-relay-bin.000001 # Rotate # # #
+# Execute the same prepared statement the second time to check that
+# no internal structures used for handling the statement
+# 'SHOW RELAYLOG EVENTS' were damaged.
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000001 # Format_desc # # #
+slave-relay-bin.000001 # Rotate # # #
+DEALLOCATE PREPARE stmt_1;
+# Create the user u1 without the REPLICATION SLAVE privilege required
+# for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS
+# and check that attempt to execute the statements SHOW BINLOG EVENTS/
+# SHOW RELAYLOG EVENTS as a prepred statements by a user without required
+# privileges results in error.
+connect con2,localhost,u1,,test;
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+EXECUTE stmt_1;
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+EXECUTE stmt_1;
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation
+DEALLOCATE PREPARE stmt_1;
+include/rpl_end.inc
+connection default;
+DROP USER u1;
+# End of 10.2 tests
diff --git a/mysql-test/main/ps_show_log.test b/mysql-test/main/ps_show_log.test
new file mode 100644
index 00000000000..95000d2d7e0
--- /dev/null
+++ b/mysql-test/main/ps_show_log.test
@@ -0,0 +1,73 @@
+--echo #
+--echo # MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared
+--echo # statement protocol yet
+--echo #
+
+CREATE USER u1;
+
+--source include/have_binlog_format_statement.inc
+--source include/master-slave.inc
+--connection master
+CREATE TABLE t1(n INT);
+
+DROP TABLE t1;
+
+--sync_slave_with_master
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/
+EXECUTE stmt_1;
+
+--echo # Execute the same prepared statement the second time to check that
+--echo # no internal structures used for handling the statement
+--echo # 'SHOW BINLOG EVENTS' were damaged.
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--connection slave
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/
+EXECUTE stmt_1;
+
+--echo # Execute the same prepared statement the second time to check that
+--echo # no internal structures used for handling the statement
+--echo # 'SHOW RELAYLOG EVENTS' were damaged.
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--echo # Create the user u1 without the REPLICATION SLAVE privilege required
+--echo # for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS
+--echo # and check that attempt to execute the statements SHOW BINLOG EVENTS/
+--echo # SHOW RELAYLOG EVENTS as a prepred statements by a user without required
+--echo # privileges results in error.
+
+--connect (con2,localhost,u1,,test)
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+EXECUTE stmt_1;
+
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--source include/rpl_end.inc
+
+--connection default
+# Clean up
+DROP USER u1;
+
+--echo # End of 10.2 tests
diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result
index 4615c63feb2..fc7ca726c48 100644
--- a/mysql-test/main/query_cache.result
+++ b/mysql-test/main/query_cache.result
@@ -818,33 +818,33 @@ select @@character_set_results;
NULL
set character_set_results=default;
set GLOBAL query_cache_size=1355776;
-create table t1 (id int auto_increment primary key, c char(25));
+create table t1 (id int auto_increment primary key, c char(65));
insert into t1 set c = repeat('x',24);
-insert into t1 set c = concat(repeat('x',24),'x');
-insert into t1 set c = concat(repeat('x',24),'w');
-insert into t1 set c = concat(repeat('x',24),'y');
+insert into t1 set c = concat(repeat('x',64),'x');
+insert into t1 set c = concat(repeat('x',64),'w');
+insert into t1 set c = concat(repeat('x',64),'y');
set max_sort_length=200;
select c from t1 order by c, id;
c
xxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxw
-xxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy
reset query cache;
-set max_sort_length=20;
+set max_sort_length=64;
select c from t1 order by c, id;
c
xxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxw
-xxxxxxxxxxxxxxxxxxxxxxxxy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy
set max_sort_length=200;
select c from t1 order by c, id;
c
xxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxw
-xxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy
set max_sort_length=default;
select '1' || '3' from t1;
'1' || '3'
diff --git a/mysql-test/main/query_cache.test b/mysql-test/main/query_cache.test
index 118ebbf1703..6e113f0cdb7 100644
--- a/mysql-test/main/query_cache.test
+++ b/mysql-test/main/query_cache.test
@@ -606,15 +606,15 @@ set character_set_results=default;
#
# max_sort_length
set GLOBAL query_cache_size=1355776;
-create table t1 (id int auto_increment primary key, c char(25));
+create table t1 (id int auto_increment primary key, c char(65));
insert into t1 set c = repeat('x',24);
-insert into t1 set c = concat(repeat('x',24),'x');
-insert into t1 set c = concat(repeat('x',24),'w');
-insert into t1 set c = concat(repeat('x',24),'y');
+insert into t1 set c = concat(repeat('x',64),'x');
+insert into t1 set c = concat(repeat('x',64),'w');
+insert into t1 set c = concat(repeat('x',64),'y');
set max_sort_length=200;
select c from t1 order by c, id;
reset query cache;
-set max_sort_length=20;
+set max_sort_length=64;
select c from t1 order by c, id;
set max_sort_length=200;
select c from t1 order by c, id;
diff --git a/mysql-test/main/range.result b/mysql-test/main/range.result
index c10ddf9d9fd..afd1571283f 100644
--- a/mysql-test/main/range.result
+++ b/mysql-test/main/range.result
@@ -3218,6 +3218,59 @@ pk a b
1 5 50
65 5 50
drop table t1;
+create table t1(a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t2 (
+pk int primary key,
+key1 int,
+col1 varchar(255),
+key (key1, pk)
+);
+insert into t2 (pk, key1)
+select A.a+10 *B.a + 100*C.a, A.a+10 *B.a +100*C.a from t1 A, t1 B, t1 C;
+# This must use ALL, not range:
+explain select * from t2 force index (primary) where pk not in (1,2,3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 1000 Using where
+drop table t1,t2;
+#
+# MDEV-24444: ASAN use-after-poison in Item_func_in::get_func_mm_tree with NOT IN
+#
+CREATE TABLE t1 (id INT, a CHAR(3), b INT, PRIMARY KEY(id), KEY(b), KEY(a));
+INSERT INTO t1 VALUES (1,'foo',10),(2,'bar',20);
+CREATE TABLE t2 (code CHAR(8), num INT, PRIMARY KEY (code));
+INSERT INTO t2 VALUES ('100',1),('111',2);
+SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR t2.num != 3;
+id a b code num
+DROP TABLE t1, t2;
+#
+# MDEV-23634: Select query hanged the server and leads to OOM ...
+# (The fix is to add the same handling for "col!=const" as MDEV-21958 did for NOT IN)
+#
+create table t1 (pk int primary key, a int);
+insert into t1 (pk) values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+# must not use range:
+explain select * from t1 force index (primary) where pk != 1 and pk!=2 ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 10 Using where
+drop table t1;
+#
+# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+#
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2
+1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3)
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+pk i v a b
+1 1 2 1 2
+2 2 4 2 4
+drop table t1, t2;
#
# End of 10.2 tests
#
diff --git a/mysql-test/main/range.test b/mysql-test/main/range.test
index 65f580698c5..de2a428c49a 100644
--- a/mysql-test/main/range.test
+++ b/mysql-test/main/range.test
@@ -2180,6 +2180,65 @@ eval $q4;
drop table t1;
+#
+# MDEV-21958: Query having many NOT-IN clauses running forever (testcase 2)
+#
+create table t1(a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t2 (
+ pk int primary key,
+ key1 int,
+ col1 varchar(255),
+ key (key1, pk)
+);
+
+insert into t2 (pk, key1)
+select A.a+10 *B.a + 100*C.a, A.a+10 *B.a +100*C.a from t1 A, t1 B, t1 C;
+
+--echo # This must use ALL, not range:
+explain select * from t2 force index (primary) where pk not in (1,2,3);
+
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-24444: ASAN use-after-poison in Item_func_in::get_func_mm_tree with NOT IN
+--echo #
+CREATE TABLE t1 (id INT, a CHAR(3), b INT, PRIMARY KEY(id), KEY(b), KEY(a));
+INSERT INTO t1 VALUES (1,'foo',10),(2,'bar',20);
+
+CREATE TABLE t2 (code CHAR(8), num INT, PRIMARY KEY (code));
+INSERT INTO t2 VALUES ('100',1),('111',2);
+
+SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR t2.num != 3;
+
+DROP TABLE t1, t2;
+
+
+--echo #
+--echo # MDEV-23634: Select query hanged the server and leads to OOM ...
+--echo # (The fix is to add the same handling for "col!=const" as MDEV-21958 did for NOT IN)
+--echo #
+create table t1 (pk int primary key, a int);
+insert into t1 (pk) values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+--echo # must not use range:
+explain select * from t1 force index (primary) where pk != 1 and pk!=2 ;
+drop table t1;
+
+--echo #
+--echo # MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+--echo #
+
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+drop table t1, t2;
+
--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/range_mrr_icp.result b/mysql-test/main/range_mrr_icp.result
index 826ac621064..3fce8b0fc23 100644
--- a/mysql-test/main/range_mrr_icp.result
+++ b/mysql-test/main/range_mrr_icp.result
@@ -3215,6 +3215,59 @@ pk a b
70 4 40
71 2 20
drop table t1;
+create table t1(a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t2 (
+pk int primary key,
+key1 int,
+col1 varchar(255),
+key (key1, pk)
+);
+insert into t2 (pk, key1)
+select A.a+10 *B.a + 100*C.a, A.a+10 *B.a +100*C.a from t1 A, t1 B, t1 C;
+# This must use ALL, not range:
+explain select * from t2 force index (primary) where pk not in (1,2,3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 1000 Using where
+drop table t1,t2;
+#
+# MDEV-24444: ASAN use-after-poison in Item_func_in::get_func_mm_tree with NOT IN
+#
+CREATE TABLE t1 (id INT, a CHAR(3), b INT, PRIMARY KEY(id), KEY(b), KEY(a));
+INSERT INTO t1 VALUES (1,'foo',10),(2,'bar',20);
+CREATE TABLE t2 (code CHAR(8), num INT, PRIMARY KEY (code));
+INSERT INTO t2 VALUES ('100',1),('111',2);
+SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR t2.num != 3;
+id a b code num
+DROP TABLE t1, t2;
+#
+# MDEV-23634: Select query hanged the server and leads to OOM ...
+# (The fix is to add the same handling for "col!=const" as MDEV-21958 did for NOT IN)
+#
+create table t1 (pk int primary key, a int);
+insert into t1 (pk) values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+# must not use range:
+explain select * from t1 force index (primary) where pk != 1 and pk!=2 ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 10 Using where
+drop table t1;
+#
+# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+#
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2
+1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3)
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+pk i v a b
+1 1 2 1 2
+2 2 4 2 4
+drop table t1, t2;
#
# End of 10.2 tests
#
diff --git a/mysql-test/main/range_vs_index_merge.result b/mysql-test/main/range_vs_index_merge.result
index f0bf2224c92..286338d0433 100644
--- a/mysql-test/main/range_vs_index_merge.result
+++ b/mysql-test/main/range_vs_index_merge.result
@@ -1810,11 +1810,11 @@ CREATE TABLE t1 (a int PRIMARY KEY, b int, INDEX idx(b));
INSERT INTO t1 VALUES (167,9999), (168,10000);
EXPLAIN
SELECT * FROM t1
-WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY,idx PRIMARY 0 NULL 2 Using index condition; Using where
SELECT * FROM t1
-WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
a b
167 9999
168 10000
@@ -1846,12 +1846,12 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range state,capital state 71 NULL 8 Using index condition; Using where
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id state capital
4 Florida Tallahassee
diff --git a/mysql-test/main/range_vs_index_merge.test b/mysql-test/main/range_vs_index_merge.test
index 5ed5f621ab6..94210ce5dd3 100644
--- a/mysql-test/main/range_vs_index_merge.test
+++ b/mysql-test/main/range_vs_index_merge.test
@@ -1231,9 +1231,9 @@ INSERT INTO t1 VALUES (167,9999), (168,10000);
EXPLAIN
SELECT * FROM t1
- WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+ WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
SELECT * FROM t1
- WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+ WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
DROP TABLE t1;
@@ -1266,10 +1266,10 @@ ANALYZE TABLE t1;
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
DROP TABLE t1;
diff --git a/mysql-test/main/range_vs_index_merge_innodb.result b/mysql-test/main/range_vs_index_merge_innodb.result
index 4cb4745d63a..65800e7397b 100644
--- a/mysql-test/main/range_vs_index_merge_innodb.result
+++ b/mysql-test/main/range_vs_index_merge_innodb.result
@@ -1816,11 +1816,11 @@ CREATE TABLE t1 (a int PRIMARY KEY, b int, INDEX idx(b));
INSERT INTO t1 VALUES (167,9999), (168,10000);
EXPLAIN
SELECT * FROM t1
-WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index PRIMARY,idx idx 5 NULL 2 Using where; Using index
SELECT * FROM t1
-WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
+WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR (a<2 or a>2);
a b
167 9999
168 10000
@@ -1852,12 +1852,12 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range state,capital state 71 NULL 8 Using index condition; Using where
SELECT * FROM t1 FORCE KEY (state,capital)
-WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
+WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND (id<9 or id>9)
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id state capital
4 Florida Tallahassee
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index ca6a7ef25c7..7e3202337ec 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -1882,4 +1882,62 @@ a b
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
drop table t1;
# End of 10.1 tests
+#
+# MDEV-22583: Selectivity for BIT columns in filtered column for EXPLAIN is incorrect
+#
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size=255;
+CREATE TABLE t1 (a BIT(32), b INT);
+INSERT INTO t1 VALUES (80, 80), (81, 81), (82, 82);
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN EXTENDED SELECT * from t1 where t1.a >= 81;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 66.41 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` >= 81
+SELECT HEX(a), b from t1 where t1.a >= 81;
+HEX(a) b
+51 81
+52 82
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+DROP TABLE t1;
+#
+# MDEV-19474: Histogram statistics are used even with optimizer_use_condition_selectivity=3
+#
+CREATE TABLE t1(a int);
+INSERT INTO t1 values (1),(2),(2),(3),(4);
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size= 255;
+set use_stat_tables='preferably';
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 39.84 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+SET optimizer_use_condition_selectivity=3;
+# filtered should show 25 %
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+FLUSH TABLES;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+DROP TABLE t1;
+# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index 2a0cc823eb4..1d96dc0bf80 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -1281,6 +1281,51 @@ drop table t1;
--echo # End of 10.1 tests
+--echo #
+--echo # MDEV-22583: Selectivity for BIT columns in filtered column for EXPLAIN is incorrect
+--echo #
+
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size=255;
+CREATE TABLE t1 (a BIT(32), b INT);
+INSERT INTO t1 VALUES (80, 80), (81, 81), (82, 82);
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+EXPLAIN EXTENDED SELECT * from t1 where t1.a >= 81;
+SELECT HEX(a), b from t1 where t1.a >= 81;
+
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-19474: Histogram statistics are used even with optimizer_use_condition_selectivity=3
+--echo #
+
+CREATE TABLE t1(a int);
+INSERT INTO t1 values (1),(2),(2),(3),(4);
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size= 255;
+
+set use_stat_tables='preferably';
+
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+SET optimizer_use_condition_selectivity=3;
+
+--echo # filtered should show 25 %
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+FLUSH TABLES;
+
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+
+DROP TABLE t1;
+
+--echo # End of 10.2 tests
+
#
# Clean up
#
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 3bf313ae2b3..a87d0f53bfe 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -1443,7 +1443,7 @@ EXPLAIN EXTENDED
SELECT * FROM t1, t2
WHERE a <> 'USARussian' AND b IS NULL;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ref PRIMARY,b b 5 const 2 66.67 Using where; Using index
+1 SIMPLE t1 ref PRIMARY,b b 5 const 1 100.00 Using where; Using index
1 SIMPLE t2 ALL NULL NULL NULL NULL 14 100.00 Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`i` AS `i` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` <> 'USARussian' and `test`.`t1`.`b` is null
@@ -1892,6 +1892,64 @@ a b
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
drop table t1;
# End of 10.1 tests
+#
+# MDEV-22583: Selectivity for BIT columns in filtered column for EXPLAIN is incorrect
+#
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size=255;
+CREATE TABLE t1 (a BIT(32), b INT);
+INSERT INTO t1 VALUES (80, 80), (81, 81), (82, 82);
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN EXTENDED SELECT * from t1 where t1.a >= 81;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 66.41 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` >= 81
+SELECT HEX(a), b from t1 where t1.a >= 81;
+HEX(a) b
+51 81
+52 82
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+DROP TABLE t1;
+#
+# MDEV-19474: Histogram statistics are used even with optimizer_use_condition_selectivity=3
+#
+CREATE TABLE t1(a int);
+INSERT INTO t1 values (1),(2),(2),(3),(4);
+SET optimizer_use_condition_selectivity=4;
+SET histogram_size= 255;
+set use_stat_tables='preferably';
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 39.84 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+SET optimizer_use_condition_selectivity=3;
+# filtered should show 25 %
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+FLUSH TABLES;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 25.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 2
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set histogram_size=@save_histogram_size;
+set use_stat_tables= @save_use_stat_tables;
+DROP TABLE t1;
+# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
set @tmp_ust= @@use_stat_tables;
diff --git a/mysql-test/main/set_statement.result b/mysql-test/main/set_statement.result
index 511ecf77357..53574fb4e4f 100644
--- a/mysql-test/main/set_statement.result
+++ b/mysql-test/main/set_statement.result
@@ -1217,6 +1217,31 @@ set @rnd=1;
select @rnd;
@rnd
0
+#
+# MDEV-24860: Incorrect behaviour of SET STATEMENT in case
+# it is executed as a prepared statement
+#
+PREPARE stmt FROM "SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TABLE t1 AS SELECT CONCAT('abc') AS c1";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+# Show definition of the table t1 created using Prepared Statement
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(3) NOT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+# Create the table t1 with the same definition as it used before
+# using regular statement execution mode.
+SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TABLE t1 AS SELECT CONCAT('abc') AS c1;
+# Show that the table has the same definition as it is in case the table
+# created in prepared statement mode.
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` varchar(3) NOT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
create table t (a int);
SET sql_mode=ORACLE;
SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
@@ -1234,3 +1259,4 @@ SET sql_mode=ORACLE;
SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
SET sql_mode=default;
SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
+# End of 10.4 tests
diff --git a/mysql-test/main/set_statement.test b/mysql-test/main/set_statement.test
index 12a6ccad8f9..670e9862abc 100644
--- a/mysql-test/main/set_statement.test
+++ b/mysql-test/main/set_statement.test
@@ -1137,6 +1137,30 @@ while ($1)
--echo # @rnd should be 0
select @rnd;
+
+--echo #
+--echo # MDEV-24860: Incorrect behaviour of SET STATEMENT in case
+--echo # it is executed as a prepared statement
+--echo #
+PREPARE stmt FROM "SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TABLE t1 AS SELECT CONCAT('abc') AS c1";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+
+--echo # Show definition of the table t1 created using Prepared Statement
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
+--echo # Create the table t1 with the same definition as it used before
+--echo # using regular statement execution mode.
+SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TABLE t1 AS SELECT CONCAT('abc') AS c1;
+
+--echo # Show that the table has the same definition as it is in case the table
+--echo # created in prepared statement mode.
+SHOW CREATE TABLE t1;
+
+DROP TABLE t1;
+
create table t (a int);
SET sql_mode=ORACLE;
SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
@@ -1152,3 +1176,5 @@ SET sql_mode=ORACLE;
SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
SET sql_mode=default;
SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/skip_grants-master.opt b/mysql-test/main/skip_grants.opt
index 5699a3387b8..5699a3387b8 100644
--- a/mysql-test/main/skip_grants-master.opt
+++ b/mysql-test/main/skip_grants.opt
diff --git a/mysql-test/main/skip_grants.result b/mysql-test/main/skip_grants.result
index 154f77fff76..a8633807571 100644
--- a/mysql-test/main/skip_grants.result
+++ b/mysql-test/main/skip_grants.result
@@ -48,10 +48,16 @@ DROP PROCEDURE p3;
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
+#
+# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
+#
set global event_scheduler=1;
Warnings:
Note 1408 Event Scheduler: Loaded 0 events
set global event_scheduler=0;
+#
+# Bug#26285 Selecting information_schema crahes server
+#
select count(*) from information_schema.COLUMN_PRIVILEGES;
count(*)
0
@@ -64,14 +70,21 @@ count(*)
select count(*) from information_schema.USER_PRIVILEGES;
count(*)
0
-End of 5.0 tests
+#
+# End of 5.0 tests
+#
#
# Bug#29817 Queries with UDF fail with non-descriptive error
# if mysql.proc is missing
#
select no_such_function(1);
ERROR 42000: FUNCTION test.no_such_function does not exist
-End of 5.1 tests
+#
+# End of 5.1 tests
+#
+#
+# MDEV-8280 crash in 'show global status' with --skip-grant-tables
+#
show global status like 'Acl%';
Variable_name Value
Acl_column_grants 0
@@ -85,6 +98,17 @@ Acl_role_grants 0
Acl_roles 0
Acl_table_grants 0
Acl_users 0
+#
+# End of 10.1 tests
+#
+#
+# MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables
+#
+set role x;
+ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
+#
+# End of 10.2 tests
+#
show create user root@localhost;
ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
insert mysql.global_priv values ('foo', 'bar', '{}');
@@ -112,3 +136,6 @@ CREATE USER `baz`@`baz` IDENTIFIED BY PASSWORD '*E52096EF8EB0240275A7FE9E069101C
drop user bar@foo;
drop user baz@baz;
# restart
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/skip_grants.test b/mysql-test/main/skip_grants.test
index ccad3c2d13f..eb8d3c3df88 100644
--- a/mysql-test/main/skip_grants.test
+++ b/mysql-test/main/skip_grants.test
@@ -89,20 +89,23 @@ DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
-#
-# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
-#
+--echo #
+--echo # Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
+--echo #
set global event_scheduler=1;
set global event_scheduler=0;
-#
-# Bug#26285 Selecting information_schema crahes server
-#
+--echo #
+--echo # Bug#26285 Selecting information_schema crahes server
+--echo #
select count(*) from information_schema.COLUMN_PRIVILEGES;
select count(*) from information_schema.SCHEMA_PRIVILEGES;
select count(*) from information_schema.TABLE_PRIVILEGES;
select count(*) from information_schema.USER_PRIVILEGES;
---echo End of 5.0 tests
+
+--echo #
+--echo # End of 5.0 tests
+--echo #
--echo #
--echo # Bug#29817 Queries with UDF fail with non-descriptive error
@@ -111,13 +114,30 @@ select count(*) from information_schema.USER_PRIVILEGES;
--error ER_SP_DOES_NOT_EXIST
select no_such_function(1);
---echo End of 5.1 tests
+--echo #
+--echo # End of 5.1 tests
+--echo #
-#
-# MDEV-8280 crash in 'show global status' with --skip-grant-tables
-#
+--echo #
+--echo # MDEV-8280 crash in 'show global status' with --skip-grant-tables
+--echo #
show global status like 'Acl%';
+--echo #
+--echo # End of 10.1 tests
+--echo #
+
+--echo #
+--echo # MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables
+--echo #
+
+--error ER_OPTION_PREVENTS_STATEMENT
+set role x;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
#
# MDEV-18297
# How to reset a forgotten root password
@@ -140,3 +160,7 @@ drop user bar@foo;
drop user baz@baz;
# need to restart the server to restore the --skip-grant state
--source include/restart_mysqld.inc
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/sp-ucs2.result b/mysql-test/main/sp-ucs2.result
index ca448efa535..047a64713af 100644
--- a/mysql-test/main/sp-ucs2.result
+++ b/mysql-test/main/sp-ucs2.result
@@ -100,22 +100,21 @@ RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci
BEGIN
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci;
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
SET NAMES utf8;
-DROP FUNCTION IF EXISTS bug48766;
CREATE FUNCTION bug48766 ()
RETURNS ENUM( 'w' ) CHARACTER SET ucs2
RETURN 0;
@@ -140,3 +139,13 @@ WHERE ROUTINE_NAME='bug48766';
DTD_IDENTIFIER
enum('а','б','в','г')
DROP FUNCTION bug48766;
+call mtr.add_suppression('invalid value in column mysql.proc.');
+set collation_connection=ucs2_general_ci;
+insert into mysql.proc (db, name, type, specific_name, language, sql_data_access, is_deterministic, security_type, param_list, returns, body, definer, created, modified, sql_mode, comment, character_set_client, collation_connection, db_collation, body_utf8 ) values ( 'a', 'a', 'function', 'bug14233_1', 'sql', 'reads_sql_data', 'no', 'definer', '', 'int(10)', 'select * from mysql.user', 'root@localhost', now(), '0000-00-00 00:00:00', '', '', '', '', '', 'select * from mysql.user' );
+select routine_name from information_schema.routines where routine_name='a';
+routine_name
+a
+Warnings:
+Warning 1601 Creation context of stored routine `a`.`a` is invalid
+set collation_connection=default;
+delete from mysql.proc where name='a';
diff --git a/mysql-test/main/sp-ucs2.test b/mysql-test/main/sp-ucs2.test
index a1aec8071b4..c6dbdaacb5e 100644
--- a/mysql-test/main/sp-ucs2.test
+++ b/mysql-test/main/sp-ucs2.test
@@ -114,35 +114,35 @@ DROP FUNCTION f1|
#
# COLLATE with no CHARACTER SET in IN param
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) COLLATE ucs2_unicode_ci)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
RETURN 'str';
END|
+DROP FUNCTION f|
#
# COLLATE with no CHARACTER SET in RETURNS
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci
BEGIN
RETURN 'str';
END|
+DROP FUNCTION f|
#
# COLLATE with no CHARACTER SET in DECLARE
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci;
RETURN 'str';
END|
+DROP FUNCTION f|
delimiter ;|
@@ -151,9 +151,6 @@ delimiter ;|
# Bug#48766 SHOW CREATE FUNCTION returns extra data in return clause
#
SET NAMES utf8;
---disable_warnings
-DROP FUNCTION IF EXISTS bug48766;
---enable_warnings
#
# Test that Latin letters are not prepended with extra '\0'.
#
@@ -175,3 +172,13 @@ SELECT DTD_IDENTIFIER FROM INFORMATION_SCHEMA.ROUTINES
WHERE ROUTINE_NAME='bug48766';
DROP FUNCTION bug48766;
+
+#
+#
+#
+call mtr.add_suppression('invalid value in column mysql.proc.');
+set collation_connection=ucs2_general_ci;
+insert into mysql.proc (db, name, type, specific_name, language, sql_data_access, is_deterministic, security_type, param_list, returns, body, definer, created, modified, sql_mode, comment, character_set_client, collation_connection, db_collation, body_utf8 ) values ( 'a', 'a', 'function', 'bug14233_1', 'sql', 'reads_sql_data', 'no', 'definer', '', 'int(10)', 'select * from mysql.user', 'root@localhost', now(), '0000-00-00 00:00:00', '', '', '', '', '', 'select * from mysql.user' );
+select routine_name from information_schema.routines where routine_name='a';
+set collation_connection=default;
+delete from mysql.proc where name='a';
diff --git a/mysql-test/main/stat_tables.result b/mysql-test/main/stat_tables.result
index 55d8bbf51c9..53557b39b05 100644
--- a/mysql-test/main/stat_tables.result
+++ b/mysql-test/main/stat_tables.result
@@ -829,6 +829,20 @@ length(a)
set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+#
+# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+#
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze error Invalid argument
+DROP TABLE t1;
# please keep this at the last
set @@global.histogram_size=@save_histogram_size;
# Start of 10.4 tests
diff --git a/mysql-test/main/stat_tables.test b/mysql-test/main/stat_tables.test
index ca341a93b81..9955908bd60 100644
--- a/mysql-test/main/stat_tables.test
+++ b/mysql-test/main/stat_tables.test
@@ -1,4 +1,5 @@
--source include/have_stat_tables.inc
+--source include/have_partition.inc
select @@global.use_stat_tables;
select @@session.use_stat_tables;
@@ -573,6 +574,17 @@ set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+--echo #
+--echo # MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+--echo #
+
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+DROP TABLE t1;
+
+
--echo # please keep this at the last
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/stat_tables_innodb.result b/mysql-test/main/stat_tables_innodb.result
index 26d4bff6be9..41fd303ed4c 100644
--- a/mysql-test/main/stat_tables_innodb.result
+++ b/mysql-test/main/stat_tables_innodb.result
@@ -861,6 +861,20 @@ length(a)
set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+#
+# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+#
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze error Invalid argument
+DROP TABLE t1;
# please keep this at the last
set @@global.histogram_size=@save_histogram_size;
# Start of 10.4 tests
diff --git a/mysql-test/main/status2.result b/mysql-test/main/status2.result
index fa0fc4e1061..60309e14fe3 100644
--- a/mysql-test/main/status2.result
+++ b/mysql-test/main/status2.result
@@ -74,4 +74,12 @@ DROP TRIGGER trigg1;
DROP FUNCTION testQuestion;
DROP EVENT ev1;
DROP TABLE t1,t2;
-End of 6.0 tests
+#
+# End of 5.5 tests
+#
+select variable_value < 1024*1024*1024 from information_schema.global_status where variable_name='memory_used';
+variable_value < 1024*1024*1024
+1
+#
+# End of 10.2 tests
+#
diff --git a/mysql-test/main/status2.test b/mysql-test/main/status2.test
index fa3b718efaa..ea674c2ed7c 100644
--- a/mysql-test/main/status2.test
+++ b/mysql-test/main/status2.test
@@ -64,5 +64,14 @@ DROP TRIGGER trigg1;
DROP FUNCTION testQuestion;
DROP EVENT ev1;
DROP TABLE t1,t2;
---echo End of 6.0 tests
+
+--echo #
+--echo # End of 5.5 tests
+--echo #
+
+select variable_value < 1024*1024*1024 from information_schema.global_status where variable_name='memory_used';
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result
index 349e7dca129..15ebaa33474 100644
--- a/mysql-test/main/subselect.result
+++ b/mysql-test/main/subselect.result
@@ -7344,6 +7344,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test
index be17254202e..d87aba57567 100644
--- a/mysql-test/main/subselect.test
+++ b/mysql-test/main/subselect.test
@@ -6171,6 +6171,41 @@ SELECT * FROM t t1 RIGHT JOIN t t2 ON (t2.pk = t1.pk)
DROP TABLE t;
+--echo #
+--echo # MDEV-25002: Outer reference in ON clause of subselect
+--echo #
+
+create table t1 (
+ pk int primary key,
+ a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+
+create table t2 (
+ pk int primary key,
+ b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+
+create table t3 (a int);
+insert into t3 values (1),(2);
+
+select a,
+ (select count(*) from t1, t2
+ where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+select a,
+ (select count(*) from t1 join t2 on t2.pk=t3.a
+ where t1.pk=1) as sq
+from t3;
+
+select a from t3
+ where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+select a from t3
+ where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+
+drop table t1,t2,t3;
+
--echo # End of 10.2 tests
diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result
index 79c42def277..4452c456022 100644
--- a/mysql-test/main/subselect4.result
+++ b/mysql-test/main/subselect4.result
@@ -2687,6 +2687,42 @@ f
bar
DROP TABLE t1, t2;
#
+# MDEV-23449: alias do not exist and a query do not report an error
+#
+CREATE TABLE t1(a INT, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+SELECT a, b FROM t1 WHERE a IN (SELECT A.a FROM t1 A GROUP BY s.id);
+ERROR 42S22: Unknown column 's.id' in 'group statement'
+DROP TABLE t1;
+#
+# MDEV-24519: Server crashes in Charset::set_charset upon SELECT
+#
+CREATE TABLE t1 (a VARBINARY(8));
+INSERT INTO t1 VALUES ('foo'),('bar');
+CREATE TABLE t2 (b VARBINARY(8));
+EXPLAIN
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 'qux', 'qux') AND a = (SELECT MIN(b) FROM t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 'qux', 'qux') AND a = (SELECT MIN(b) FROM t2);
+a
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b VARBINARY(8));
+EXPLAIN
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 1, 2) AND a = (SELECT MIN(b) FROM t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 1, 2) AND a = (SELECT MIN(b) FROM t2);
+a
+DROP TABLE t1,t2;
+# End of 10.2 tests
+#
# MDEV-18335: Assertion `!error || error == 137' failed in subselect_rowid_merge_engine::init
#
CREATE TABLE t1 (i1 int,v1 varchar(1),KEY (v1,i1));
@@ -2717,7 +2753,6 @@ Warnings:
Warning 1931 Query execution was interrupted. The query examined at least 3020 rows, which exceeds LIMIT ROWS EXAMINED (500). The query result may be incomplete
SET join_cache_level= @save_join_cache_level;
DROP TABLE t1,t2,t3,t4;
-# End of 10.2 tests
#
# MDEV-21265: IN predicate conversion to IN subquery should be allowed for a broader set of datatype comparison
#
@@ -2751,6 +2786,67 @@ id select_type table type possible_keys key key_len ref rows Extra
set names default;
set @@in_predicate_conversion_threshold= @save_in_predicate_conversion_threshold;
DROP TABLE t1,t2;
+#
+# MDEV-24925: Server crashes in Item_subselect::init_expr_cache_tracker
+#
+CREATE TABLE t1 (id INT PRIMARY KEY);
+INSERT INTO t1 VALUES (1),(2);
+SELECT
+1 IN (
+SELECT
+(SELECT COUNT(id)
+FROM t1
+WHERE t1_outer.id <> id
+) AS f
+FROM
+t1 AS t1_outer
+GROUP BY f
+);
+1 IN (
+SELECT
+(SELECT COUNT(id)
+FROM t1
+WHERE t1_outer.id <> id
+) AS f
+FROM
+t1 AS t1_outer
+GROUP BY f
+)
+1
+SELECT
+1 IN (
+SELECT
+(SELECT COUNT(id)
+FROM t1
+WHERE t1_outer.id <> id
+) AS f
+FROM
+t1 AS t1_outer
+GROUP BY 1
+);
+1 IN (
+SELECT
+(SELECT COUNT(id)
+FROM t1
+WHERE t1_outer.id <> id
+) AS f
+FROM
+t1 AS t1_outer
+GROUP BY 1
+)
+1
+DROP TABLE t1;
+#
+# MDEV-24898: Server crashes in st_select_lex::next_select / Item_subselect::is_expensive
+# (Testcase)
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4);
+SELECT 1 IN (SELECT (SELECT a FROM t1) AS x FROM t2 GROUP BY x);
+ERROR 21000: Subquery returns more than 1 row
+drop table t1,t2;
# End of 10.3 tests
#
# MDEV-19134: EXISTS() slower if ORDER BY is defined
diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test
index 3a9ad7f715c..a2dfc48ffe2 100644
--- a/mysql-test/main/subselect4.test
+++ b/mysql-test/main/subselect4.test
@@ -2202,6 +2202,43 @@ SELECT * FROM t2;
DROP TABLE t1, t2;
--echo #
+--echo # MDEV-23449: alias do not exist and a query do not report an error
+--echo #
+
+CREATE TABLE t1(a INT, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5);
+
+--error ER_BAD_FIELD_ERROR
+SELECT a, b FROM t1 WHERE a IN (SELECT A.a FROM t1 A GROUP BY s.id);
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-24519: Server crashes in Charset::set_charset upon SELECT
+--echo #
+
+CREATE TABLE t1 (a VARBINARY(8));
+INSERT INTO t1 VALUES ('foo'),('bar');
+CREATE TABLE t2 (b VARBINARY(8));
+
+EXPLAIN
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 'qux', 'qux') AND a = (SELECT MIN(b) FROM t2);
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 'qux', 'qux') AND a = (SELECT MIN(b) FROM t2);
+
+DROP TABLE t1,t2;
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b VARBINARY(8));
+
+EXPLAIN
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 1, 2) AND a = (SELECT MIN(b) FROM t2);
+SELECT a FROM t1 WHERE (a, a) IN (SELECT 1, 2) AND a = (SELECT MIN(b) FROM t2);
+
+DROP TABLE t1,t2;
+
+--echo # End of 10.2 tests
+
+--echo #
--echo # MDEV-18335: Assertion `!error || error == 137' failed in subselect_rowid_merge_engine::init
--echo #
@@ -2236,9 +2273,6 @@ from t2 join t1 on
SET join_cache_level= @save_join_cache_level;
DROP TABLE t1,t2,t3,t4;
-
---echo # End of 10.2 tests
-
--echo #
--echo # MDEV-21265: IN predicate conversion to IN subquery should be allowed for a broader set of datatype comparison
--echo #
@@ -2274,6 +2308,51 @@ set names default;
set @@in_predicate_conversion_threshold= @save_in_predicate_conversion_threshold;
DROP TABLE t1,t2;
+--echo #
+--echo # MDEV-24925: Server crashes in Item_subselect::init_expr_cache_tracker
+--echo #
+CREATE TABLE t1 (id INT PRIMARY KEY);
+INSERT INTO t1 VALUES (1),(2);
+
+SELECT
+ 1 IN (
+ SELECT
+ (SELECT COUNT(id)
+ FROM t1
+ WHERE t1_outer.id <> id
+ ) AS f
+ FROM
+ t1 AS t1_outer
+ GROUP BY f
+ );
+
+SELECT
+ 1 IN (
+ SELECT
+ (SELECT COUNT(id)
+ FROM t1
+ WHERE t1_outer.id <> id
+ ) AS f
+ FROM
+ t1 AS t1_outer
+ GROUP BY 1
+ );
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-24898: Server crashes in st_select_lex::next_select / Item_subselect::is_expensive
+--echo # (Testcase)
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2); # Optional, fails either way
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4); # Optional, fails either way
+
+--error ER_SUBQUERY_NO_1_ROW
+SELECT 1 IN (SELECT (SELECT a FROM t1) AS x FROM t2 GROUP BY x);
+drop table t1,t2;
+
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/subselect_exists2in.result b/mysql-test/main/subselect_exists2in.result
index e8ef7081b09..6ff518b5a29 100644
--- a/mysql-test/main/subselect_exists2in.result
+++ b/mysql-test/main/subselect_exists2in.result
@@ -1102,4 +1102,22 @@ U5.`storage_target_id` = V0.`id`
);
id
drop table t1,t2,t3;
+#
+# MDEV-25407: EXISTS subquery with correlation in ON expression crashes
+#
+create table t10(a int primary key);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t11(a int primary key);
+insert into t11 select a.a + b.a* 10 + c.a * 100 from t10 a, t10 b, t10 c;
+create table t1 (a int, b int);
+insert into t1 select a,a from t10;
+create table t2 (a int, b int);
+insert into t2 select a,a from t11;
+create table t3 as select * from t2;
+explain select * from t1 where exists (select t2.a from t2 left join t3 on (t3.b=t1.b) where t2.a=t1.a);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1000 Using where; Start temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 1000 Using where; End temporary; Using join buffer (incremental, BNL join)
+drop table t1, t2, t3, t10, t11;
set optimizer_switch=default;
diff --git a/mysql-test/main/subselect_exists2in.test b/mysql-test/main/subselect_exists2in.test
index e27ce57038b..e70d643138b 100644
--- a/mysql-test/main/subselect_exists2in.test
+++ b/mysql-test/main/subselect_exists2in.test
@@ -941,5 +941,28 @@ WHERE (
drop table t1,t2,t3;
+--echo #
+--echo # MDEV-25407: EXISTS subquery with correlation in ON expression crashes
+--echo #
+create table t10(a int primary key);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t11(a int primary key);
+insert into t11 select a.a + b.a* 10 + c.a * 100 from t10 a, t10 b, t10 c;
+
+create table t1 (a int, b int);
+insert into t1 select a,a from t10;
+
+create table t2 (a int, b int);
+insert into t2 select a,a from t11;
+
+create table t3 as select * from t2;
+
+
+explain select * from t1 where exists (select t2.a from t2 left join t3 on (t3.b=t1.b) where t2.a=t1.a);
+
+drop table t1, t2, t3, t10, t11;
+
+
#restore defaults
set optimizer_switch=default;
diff --git a/mysql-test/main/subselect_mat_cost_bugs.result b/mysql-test/main/subselect_mat_cost_bugs.result
index a18c5e608f1..ecceac27b2d 100644
--- a/mysql-test/main/subselect_mat_cost_bugs.result
+++ b/mysql-test/main/subselect_mat_cost_bugs.result
@@ -189,8 +189,8 @@ SELECT alias2.f2 AS field1
FROM t1 AS alias1 JOIN ( SELECT * FROM t2 ) AS alias2 ON alias2.f3 = alias1.f1
WHERE (
SELECT t2.f2
-FROM t2 JOIN t1 ON t1.f1
-WHERE t1.f1 AND alias2.f10
+FROM t2 JOIN t1 ON (t1.f1>0 or t1.f1<0)
+WHERE (t1.f1>0 or t1.f1<0) AND alias2.f10
)
ORDER BY field1 ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -203,8 +203,8 @@ SELECT alias2.f2 AS field1
FROM t1 AS alias1 JOIN ( SELECT * FROM t2 ) AS alias2 ON alias2.f3 = alias1.f1
WHERE (
SELECT t2.f2
-FROM t2 JOIN t1 ON t1.f1
-WHERE t1.f1 AND alias2.f10
+FROM t2 JOIN t1 ON (t1.f1>0 or t1.f1<0)
+WHERE (t1.f1>0 or t1.f1<0) AND alias2.f10
)
ORDER BY field1 ;
field1
diff --git a/mysql-test/main/subselect_mat_cost_bugs.test b/mysql-test/main/subselect_mat_cost_bugs.test
index 028cdced560..ba1aad06a15 100644
--- a/mysql-test/main/subselect_mat_cost_bugs.test
+++ b/mysql-test/main/subselect_mat_cost_bugs.test
@@ -217,8 +217,8 @@ SELECT alias2.f2 AS field1
FROM t1 AS alias1 JOIN ( SELECT * FROM t2 ) AS alias2 ON alias2.f3 = alias1.f1
WHERE (
SELECT t2.f2
- FROM t2 JOIN t1 ON t1.f1
- WHERE t1.f1 AND alias2.f10
+ FROM t2 JOIN t1 ON (t1.f1>0 or t1.f1<0)
+ WHERE (t1.f1>0 or t1.f1<0) AND alias2.f10
)
ORDER BY field1 ;
@@ -226,8 +226,8 @@ SELECT alias2.f2 AS field1
FROM t1 AS alias1 JOIN ( SELECT * FROM t2 ) AS alias2 ON alias2.f3 = alias1.f1
WHERE (
SELECT t2.f2
- FROM t2 JOIN t1 ON t1.f1
- WHERE t1.f1 AND alias2.f10
+ FROM t2 JOIN t1 ON (t1.f1>0 or t1.f1<0)
+ WHERE (t1.f1>0 or t1.f1<0) AND alias2.f10
)
ORDER BY field1 ;
diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result
index 84c415d1ce1..b1432cf0979 100644
--- a/mysql-test/main/subselect_no_exists_to_in.result
+++ b/mysql-test/main/subselect_no_exists_to_in.result
@@ -7344,6 +7344,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result
index 93035e235f7..da60f98bf00 100644
--- a/mysql-test/main/subselect_no_mat.result
+++ b/mysql-test/main/subselect_no_mat.result
@@ -7337,6 +7337,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result
index 09f664d3c28..fb99e237a1c 100644
--- a/mysql-test/main/subselect_no_opts.result
+++ b/mysql-test/main/subselect_no_opts.result
@@ -7335,6 +7335,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result
index 765bb15a3df..4b910009248 100644
--- a/mysql-test/main/subselect_no_scache.result
+++ b/mysql-test/main/subselect_no_scache.result
@@ -7350,6 +7350,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result
index 97d2f3b058f..f2230718754 100644
--- a/mysql-test/main/subselect_no_semijoin.result
+++ b/mysql-test/main/subselect_no_semijoin.result
@@ -7335,6 +7335,44 @@ WHERE (t2.i, t2.pk) NOT IN ( SELECT t3.i, t3.i FROM t t3, t t4 ) AND t1.c = 'foo
pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
+#
+# MDEV-25002: Outer reference in ON clause of subselect
+#
+create table t1 (
+pk int primary key,
+a int
+) engine=myisam;
+insert into t1 values (1,1), (2,2);
+create table t2 (
+pk int primary key,
+b int
+) engine=myisam;
+insert into t2 values (1,1), (2,3);
+create table t3 (a int);
+insert into t3 values (1),(2);
+select a,
+(select count(*) from t1, t2
+where t2.pk=t3.a and t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a,
+(select count(*) from t1 join t2 on t2.pk=t3.a
+where t1.pk=1) as sq
+from t3;
+a sq
+1 1
+2 1
+select a from t3
+where a in (select t2.b from t1,t2 where t2.pk=t3.a and t1.pk=1);
+a
+1
+select a from t3
+where a in (select t2.b from t1 join t2 on t2.pk=t3.a where t1.pk=1);
+a
+1
+drop table t1,t2,t3;
# End of 10.2 tests
#
# Start of 10.4 tests
diff --git a/mysql-test/main/system_mysql_db.result b/mysql-test/main/system_mysql_db.result
index 838591b3b4b..37ade694489 100644
--- a/mysql-test/main/system_mysql_db.result
+++ b/mysql-test/main/system_mysql_db.result
@@ -62,7 +62,7 @@ db CREATE TABLE `db` (
) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
View Create View character_set_client collation_connection
-user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
diff --git a/mysql-test/main/system_mysql_db_507.result b/mysql-test/main/system_mysql_db_507.result
index 2d68dc82529..8069405aa3a 100644
--- a/mysql-test/main/system_mysql_db_507.result
+++ b/mysql-test/main/system_mysql_db_507.result
@@ -214,6 +214,7 @@ alter user user@localhost password expire;
show create user user@localhost;
CREATE USER for user@localhost
CREATE USER `user`@`localhost` PASSWORD EXPIRE
+ALTER USER `user`@`localhost` PASSWORD EXPIRE INTERVAL 123 DAY
set password for user@localhost= password('');
show create user user@localhost;
CREATE USER for user@localhost
diff --git a/mysql-test/main/system_mysql_db_fix40123.result b/mysql-test/main/system_mysql_db_fix40123.result
index fc9a5763dbd..f76a4b37e31 100644
--- a/mysql-test/main/system_mysql_db_fix40123.result
+++ b/mysql-test/main/system_mysql_db_fix40123.result
@@ -100,7 +100,7 @@ db CREATE TABLE `db` (
) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
View Create View character_set_client collation_connection
-user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
diff --git a/mysql-test/main/system_mysql_db_fix50030.result b/mysql-test/main/system_mysql_db_fix50030.result
index 02047dc4a8c..a9ef62aa5d4 100644
--- a/mysql-test/main/system_mysql_db_fix50030.result
+++ b/mysql-test/main/system_mysql_db_fix50030.result
@@ -104,7 +104,7 @@ db CREATE TABLE `db` (
) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
View Create View character_set_client collation_connection
-user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
diff --git a/mysql-test/main/system_mysql_db_fix50117.result b/mysql-test/main/system_mysql_db_fix50117.result
index 07119cda6c6..1557817fc80 100644
--- a/mysql-test/main/system_mysql_db_fix50117.result
+++ b/mysql-test/main/system_mysql_db_fix50117.result
@@ -84,7 +84,7 @@ db CREATE TABLE `db` (
) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
View Create View character_set_client collation_connection
-user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
+user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
diff --git a/mysql-test/main/table_elim.result b/mysql-test/main/table_elim.result
index 41928f28963..b49e7b11ed6 100644
--- a/mysql-test/main/table_elim.result
+++ b/mysql-test/main/table_elim.result
@@ -544,7 +544,7 @@ drop table t0,t1,t2,t3,t4,t5,t6;
CREATE TABLE t1 (f1 int(11), PRIMARY KEY (f1)) ;
CREATE TABLE t2 (f4 varchar(1024), KEY (f4)) ;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT IGNORE INTO t2 VALUES ('xcddwntkbxyorzdv'),
('cnxxcddwntkbxyor'),('r'),('r'), ('did'),('I'),('when'),
('hczkfqjeggivdvac'),('e'),('okay'),('up');
diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result
index 86e8b9cf76d..0914645efbc 100644
--- a/mysql-test/main/table_value_constr.result
+++ b/mysql-test/main/table_value_constr.result
@@ -748,7 +748,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1)) `tvc_0`) where 1
explain extended select * from t1
where a in (select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
@@ -983,7 +983,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
explain extended select * from t1
where a = any (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
@@ -2622,6 +2622,448 @@ ERROR HY000: 'ignore' is not allowed in this context
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
ERROR HY000: 'default' is not allowed in this context
#
+# MDEV-24675: TVC using subqueries
+#
+values((select 1));
+(select 1)
+1
+values (2), ((select 1));
+2
+2
+1
+values ((select 1)), (2), ((select 3));
+(select 1)
+1
+2
+3
+values ((select 1), 2), (3,4), (5, (select 6));
+(select 1) 2
+1 2
+3 4
+5 6
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3), (3,2), (1,2);
+values((select max(a) from t1));
+(select max(a) from t1)
+3
+values((select min(b) from t1));
+(select min(b) from t1)
+2
+values ((select max(a) from t1), (select min(b) from t1));
+(select max(a) from t1) (select min(b) from t1)
+3 2
+values((select * from (select max(b) from t1) as t));
+(select * from (select max(b) from t1) as t)
+3
+drop table t1;
+#
+# MDEV-24618: TVC contains extra parenthesis for row expressions
+# in value list
+#
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3);
+insert into t1 values ((5,4));
+ERROR 21000: Operand should contain 1 column(s)
+values ((1,2));
+ERROR 21000: Operand should contain 1 column(s)
+select * from (values ((1,2))) dt;
+ERROR 21000: Operand should contain 1 column(s)
+values (1,2);
+1 2
+1 2
+values ((select min(a), max(b) from t1));
+ERROR 21000: Operand should contain 1 column(s)
+drop table t1;
+#
+# MDEV-24840: union of TVCs in IN subquery
+#
+create table t1 (a int) engine=myisam;
+insert into t1 values (3), (7), (1);
+select a from t1 where a in (values (7) union values (8));
+a
+7
+explain extended select a from t1 where a in (values (7) union values (8));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+5 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#4 */ select `tvc_0`.`7` from (values (7)) `tvc_0` where <cache>(`test`.`t1`.`a`) = `tvc_0`.`7` union /* select#5 */ select `tvc_0`.`8` from (values (8)) `tvc_0` where <cache>(`test`.`t1`.`a`) = `tvc_0`.`8`)))
+prepare stmt from "select a from t1 where a in (values (7) union values (8))";
+execute stmt;
+a
+7
+execute stmt;
+a
+7
+deallocate prepare stmt;
+select a from t1 where a not in (values (7) union values (8));
+a
+3
+1
+explain extended select a from t1 where a not in (values (7) union values (8));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+4 DEPENDENT SUBQUERY <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+5 DEPENDENT UNION <derived3> ALL NULL NULL NULL NULL 2 100.00 Using where
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#4 */ select `tvc_0`.`7` from (values (7)) `tvc_0` where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`7`) union /* select#5 */ select `tvc_0`.`8` from (values (8)) `tvc_0` where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`8`))))
+select a from t1 where a < all(values (7) union values (8));
+a
+3
+1
+explain extended select a from t1 where a < all(values (7) union values (8));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+4 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+5 UNION <derived3> ALL NULL NULL NULL NULL 2 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <not>(<in_optimizer>(`test`.`t1`.`a`,<min>(/* select#4 */ select `tvc_0`.`7` from (values (7)) `tvc_0` union /* select#5 */ select `tvc_0`.`8` from (values (8)) `tvc_0`) <= <cache>(`test`.`t1`.`a`)))
+select a from t1 where a >= any(values (7) union values (8));
+a
+7
+explain extended select a from t1 where a >= any(values (7) union values (8));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+4 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+5 UNION <derived3> ALL NULL NULL NULL NULL 2 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <nop>(<in_optimizer>(`test`.`t1`.`a`,<min>(/* select#4 */ select `tvc_0`.`7` from (values (7)) `tvc_0` union /* select#5 */ select `tvc_0`.`8` from (values (8)) `tvc_0`) <= <cache>(`test`.`t1`.`a`)))
+drop table t1;
+#
+# MDEV-24934:EXPLAIN for queries based on TVC using subqueries
+#
+create table t1 (a int);
+insert into t1 values (3), (7), (1);
+values (8), ((select * from t1 where a between 2 and 4));
+8
+8
+3
+explain values (8), ((select * from t1 where a between 2 and 4));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where
+values ((select * from t1 where a between 2 and 4)),
+((select * from t1 where a > 10));
+(select * from t1 where a between 2 and 4)
+3
+NULL
+explain values ((select * from t1 where a between 2 and 4)),
+((select * from t1 where a > 10));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
+3 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where
+values (10,11), ((select * from t1 where a = 7) + 1, 21);
+10 11
+10 11
+8 21
+explain values (10,11), ((select * from t1 where a = 7) + 1, 21);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where
+drop table t1;
+#
+# MDEV-24910: TVC containing subquery used as a subselect
+#
+create table t1 (a int) engine=myisam;
+insert into t1 values (3), (7), (1);
+create table t2 (b int) engine=myisam;
+insert into t2 values (1), (2);
+select (values ((select 2))) from t2;
+(values ((select 2)))
+2
+2
+explain select (values ((select 2))) from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2
+4 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1249 Select 3 was reduced during optimization
+prepare stmt from "select (values ((select 2))) from t2";
+execute stmt;
+(values ((select 2)))
+2
+2
+execute stmt;
+(values ((select 2)))
+2
+2
+deallocate prepare stmt;
+select (values ((select * from t1 where a > 10))) from t2;
+(values ((select * from t1 where a > 10)))
+NULL
+NULL
+explain select (values ((select * from t1 where a > 10))) from t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2
+4 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
+3 SUBQUERY t1 ALL NULL NULL NULL NULL 3 Using where
+prepare stmt from "select (values ((select * from t1 where a > 10))) from t2";
+execute stmt;
+(values ((select * from t1 where a > 10)))
+NULL
+NULL
+execute stmt;
+(values ((select * from t1 where a > 10)))
+NULL
+NULL
+deallocate prepare stmt;
+create table t3 (a int);
+insert into t3 values
+(3), (7), (7), (1), (3), (9), (7), (9), (8), (7), (8);
+create view v1 as select count(a) as c from t3 group by a;
+select
+(values ((select * from t3 where a in (select * from v1))));
+(values ((select * from t3 where a in (select * from v1))))
+1
+explain select
+(values ((select * from t3 where a in (select * from v1))));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
+6 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
+3 SUBQUERY t3 ALL NULL NULL NULL NULL 11
+3 SUBQUERY <subquery4> eq_ref distinct_key distinct_key 8 func 1 Using where
+4 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 11
+5 DERIVED t3 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
+prepare stmt from "select
+(values ((select * from t3 where a in (select * from v1))))";
+execute stmt;
+(values ((select * from t3 where a in (select * from v1))))
+1
+execute stmt;
+(values ((select * from t3 where a in (select * from v1))))
+1
+deallocate prepare stmt;
+select
+(values ((select * from t3
+where a > 10 and a in (select * from v1))));
+(values ((select * from t3
+where a > 10 and a in (select * from v1))))
+NULL
+explain select
+(values ((select * from t3
+where a > 10 and a in (select * from v1))));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
+6 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
+3 SUBQUERY t3 ALL NULL NULL NULL NULL 11 Using where
+3 SUBQUERY <subquery4> eq_ref distinct_key distinct_key 8 func 1 Using where
+4 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 11
+5 DERIVED t3 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
+prepare stmt from "select
+(values ((select * from t3
+where a > 10 and a in (select * from v1))))";
+execute stmt;
+(values ((select * from t3
+where a > 10 and a in (select * from v1))))
+NULL
+execute stmt;
+(values ((select * from t3
+where a > 10 and a in (select * from v1))))
+NULL
+deallocate prepare stmt;
+drop view v1;
+drop table t1,t2,t3;
+#
+# MDEV-24919: subselect formed by TVC and used in set function
+#
+select sum((values(1)));
+sum((values(1)))
+1
+#
+# MDEV-22786: Nested table values constructors
+#
+values ((values (2)));
+(values (2))
+2
+values ((values (2)), (5), (select 4));
+(values (2)) 5 (select 4)
+2 5 4
+values ((7), (values (2)), (5), (select 4));
+7 (values (2)) 5 (select 4)
+7 2 5 4
+values ((values (2))) union values ((values (3)));
+(values (2))
+2
+3
+values ((values (2))), ((values (3)));
+(values (2))
+2
+3
+values ((values (2))), ((select 4)), ((values (3)));
+(values (2))
+2
+4
+3
+values ((values (4)), (values (5))), ((values (1)), (values (7)));
+(values (4)) (values (5))
+4 5
+1 7
+values ((values (4)), (select 5)), ((select 1), (values (7)));
+(values (4)) (select 5)
+4 5
+1 7
+values ((select 2)) union values ((values (3)));
+(select 2)
+2
+3
+values ((values (2))) union values((select 3));
+(values (2))
+2
+3
+values ((values (2))) union all values ((values (2)));
+(values (2))
+2
+2
+values ((values (4)), (values (5))), ((values (1)), (values (7)))
+union
+values ((values (4)), (select 5)), ((select 2), (values (8)));
+(values (4)) (values (5))
+4 5
+1 7
+2 8
+values ((values (4)), (values (5))), ((values (1)), (values (7)))
+union all
+values ((values (4)), (select 5)), ((select 2), (values (8)));
+(values (4)) (values (5))
+4 5
+1 7
+4 5
+2 8
+values ((values (1) union values (1)));
+(values (1) union values (1))
+1
+values ((values (1) union values (1) union values (1)));
+(values (1) union values (1) union values (1))
+1
+values ((values ((values (4)))));
+(values ((values (4))))
+4
+values ((values ((select 5))));
+(values ((select 5)))
+5
+values ((select (values (4))), (values ((values(5)))));
+(select (values (4))) (values ((values(5))))
+4 5
+values ((select (values (4))), (values ((select 5))));
+(select (values (4))) (values ((select 5)))
+4 5
+values ((select (values (4))), (values ((values(5)))))
+union
+values ((select (values (4))), (values ((select 7))));
+(select (values (4))) (values ((values(5))))
+4 5
+4 7
+values ((values (2))), ((values ((values (4)))));
+(values (2))
+2
+4
+values ((values (2))), ((values ((select 4))));
+(values (2))
+2
+4
+values ((values (2))), ((values ((values (4)))))
+union
+values ((values (8))), ((values ((select 4))));
+(values (2))
+2
+4
+8
+values ((values (2))), ((values ((values (4)))))
+union all
+values ((values (8))), ((values ((select 4))));
+(values (2))
+2
+4
+8
+4
+select * from (values ((values (2)))) dt;
+(values (2))
+2
+select * from (values ((values (2)), (5), (select 4))) dt;
+(values (2)) 5 (select 4)
+2 5 4
+select * from (values ((values (2))) union values ((values (3)))) dt;
+(values (2))
+2
+3
+select * from (values ((values (2))), ((values (3)))) dt;
+(values (2))
+2
+3
+select * from (values ((values (2))), ((values (3)))) dt;
+(values (2))
+2
+3
+select * from (values ((values (2))), ((select 4)), ((values (3)))) dt;
+(values (2))
+2
+4
+3
+create table t1 (a int);
+insert into t1 values (3), (7), (1);
+values ((values ((select a from t1 where a=7))));
+(values ((select a from t1 where a=7)))
+7
+values ((values ((select (values(2)) from t1 where a=8))));
+(values ((select (values(2)) from t1 where a=8)))
+NULL
+values ((values ((select a from t1 where a=7))))
+union
+values ((values ((select (values(2)) from t1 where a=8))));
+(values ((select a from t1 where a=7)))
+7
+NULL
+values ((values ((select a from t1 where a in ((values (7)))))));
+(values ((select a from t1 where a in ((values (7))))))
+7
+values ((values ((select a from t1 where a in ((values (7), (8)))))));
+(values ((select a from t1 where a in ((values (7), (8))))))
+7
+values ((values
+((select a from t1 where a in (values (7) union values (8))))));
+(values
+((select a from t1 where a in (values (7) union values (8)))))
+7
+values ((values ((select (values(2)) from t1 where a=8))));
+(values ((select (values(2)) from t1 where a=8)))
+NULL
+values ((select (values(2)) from t1 where a<7));
+ERROR 21000: Subquery returns more than 1 row
+select * from (values ((values ((select a from t1 where a=7))))) dt;
+(values ((select a from t1 where a=7)))
+7
+select * from (values ((values ((select (values(2)) from t1 where a=8))))) dt;
+(values ((select (values(2)) from t1 where a=8)))
+NULL
+insert into t1(a) values ((values (2))), ((values (3)));
+select * from t1;
+a
+3
+7
+1
+2
+3
+drop table t1;
+End of 10.3 tests
+#
# MDEV-22610 Crash in INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT))
#
VALUES (DEFAULT) UNION VALUES (DEFAULT);
@@ -2634,3 +3076,6 @@ ERROR HY000: 'default' is not allowed in this context
INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE));
ERROR HY000: 'ignore' is not allowed in this context
DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test
index bd5e4d75904..49e1c7c18c6 100644
--- a/mysql-test/main/table_value_constr.test
+++ b/mysql-test/main/table_value_constr.test
@@ -1354,6 +1354,281 @@ EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
--error ER_UNKNOWN_ERROR
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
+--echo #
+--echo # MDEV-24675: TVC using subqueries
+--echo #
+
+values((select 1));
+
+values (2), ((select 1));
+
+values ((select 1)), (2), ((select 3));
+
+values ((select 1), 2), (3,4), (5, (select 6));
+
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3), (3,2), (1,2);
+
+values((select max(a) from t1));
+
+values((select min(b) from t1));
+
+values ((select max(a) from t1), (select min(b) from t1));
+
+values((select * from (select max(b) from t1) as t));
+
+drop table t1;
+
+--echo #
+--echo # MDEV-24618: TVC contains extra parenthesis for row expressions
+--echo # in value list
+--echo #
+
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3);
+--error ER_OPERAND_COLUMNS
+insert into t1 values ((5,4));
+
+--error ER_OPERAND_COLUMNS
+values ((1,2));
+
+--error ER_OPERAND_COLUMNS
+select * from (values ((1,2))) dt;
+
+values (1,2);
+--error ER_OPERAND_COLUMNS
+values ((select min(a), max(b) from t1));
+
+drop table t1;
+
+--echo #
+--echo # MDEV-24840: union of TVCs in IN subquery
+--echo #
+
+create table t1 (a int) engine=myisam;
+insert into t1 values (3), (7), (1);
+
+let $q=
+select a from t1 where a in (values (7) union values (8));
+eval $q;
+eval explain extended $q;
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+let $q=
+select a from t1 where a not in (values (7) union values (8));
+eval $q;
+eval explain extended $q;
+
+let $q=
+select a from t1 where a < all(values (7) union values (8));
+eval $q;
+eval explain extended $q;
+
+let $q=
+select a from t1 where a >= any(values (7) union values (8));
+eval $q;
+eval explain extended $q;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-24934:EXPLAIN for queries based on TVC using subqueries
+--echo #
+
+create table t1 (a int);
+insert into t1 values (3), (7), (1);
+
+let $q1=
+values (8), ((select * from t1 where a between 2 and 4));
+eval $q1;
+eval explain $q1;
+
+let $q2=
+values ((select * from t1 where a between 2 and 4)),
+ ((select * from t1 where a > 10));
+eval $q2;
+eval explain $q2;
+
+let $q3=
+values (10,11), ((select * from t1 where a = 7) + 1, 21);
+eval $q3;
+eval explain $q3;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-24910: TVC containing subquery used as a subselect
+--echo #
+
+create table t1 (a int) engine=myisam;
+insert into t1 values (3), (7), (1);
+create table t2 (b int) engine=myisam;
+insert into t2 values (1), (2);
+
+let $q1=
+select (values ((select 2))) from t2;
+eval $q1;
+eval explain $q1;
+eval prepare stmt from "$q1";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+let $q2=
+select (values ((select * from t1 where a > 10))) from t2;
+eval $q2;
+eval explain $q2;
+eval prepare stmt from "$q2";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+create table t3 (a int);
+insert into t3 values
+ (3), (7), (7), (1), (3), (9), (7), (9), (8), (7), (8);
+
+create view v1 as select count(a) as c from t3 group by a;
+
+let $q3=
+select
+(values ((select * from t3 where a in (select * from v1))));
+eval $q3;
+eval explain $q3;
+eval prepare stmt from "$q3";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+let $q4=
+select
+(values ((select * from t3
+ where a > 10 and a in (select * from v1))));
+eval $q4;
+eval explain $q4;
+eval prepare stmt from "$q4";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop view v1;
+drop table t1,t2,t3;
+
+--echo #
+--echo # MDEV-24919: subselect formed by TVC and used in set function
+--echo #
+
+select sum((values(1)));
+
+--echo #
+--echo # MDEV-22786: Nested table values constructors
+--echo #
+
+values ((values (2)));
+
+values ((values (2)), (5), (select 4));
+
+values ((7), (values (2)), (5), (select 4));
+
+values ((values (2))) union values ((values (3)));
+
+values ((values (2))), ((values (3)));
+
+values ((values (2))), ((select 4)), ((values (3)));
+
+values ((values (4)), (values (5))), ((values (1)), (values (7)));
+
+values ((values (4)), (select 5)), ((select 1), (values (7)));
+
+values ((select 2)) union values ((values (3)));
+
+values ((values (2))) union values((select 3));
+
+values ((values (2))) union all values ((values (2)));
+
+values ((values (4)), (values (5))), ((values (1)), (values (7)))
+union
+values ((values (4)), (select 5)), ((select 2), (values (8)));
+
+values ((values (4)), (values (5))), ((values (1)), (values (7)))
+union all
+values ((values (4)), (select 5)), ((select 2), (values (8)));
+
+values ((values (1) union values (1)));
+
+values ((values (1) union values (1) union values (1)));
+
+values ((values ((values (4)))));
+
+values ((values ((select 5))));
+
+values ((select (values (4))), (values ((values(5)))));
+
+values ((select (values (4))), (values ((select 5))));
+
+values ((select (values (4))), (values ((values(5)))))
+union
+values ((select (values (4))), (values ((select 7))));
+
+values ((values (2))), ((values ((values (4)))));
+
+values ((values (2))), ((values ((select 4))));
+
+values ((values (2))), ((values ((values (4)))))
+union
+values ((values (8))), ((values ((select 4))));
+
+values ((values (2))), ((values ((values (4)))))
+union all
+values ((values (8))), ((values ((select 4))));
+
+select * from (values ((values (2)))) dt;
+
+select * from (values ((values (2)), (5), (select 4))) dt;
+
+select * from (values ((values (2))) union values ((values (3)))) dt;
+
+select * from (values ((values (2))), ((values (3)))) dt;
+
+select * from (values ((values (2))), ((values (3)))) dt;
+
+select * from (values ((values (2))), ((select 4)), ((values (3)))) dt;
+
+create table t1 (a int);
+insert into t1 values (3), (7), (1);
+
+values ((values ((select a from t1 where a=7))));
+
+values ((values ((select (values(2)) from t1 where a=8))));
+
+values ((values ((select a from t1 where a=7))))
+union
+values ((values ((select (values(2)) from t1 where a=8))));
+
+values ((values ((select a from t1 where a in ((values (7)))))));
+
+values ((values ((select a from t1 where a in ((values (7), (8)))))));
+
+values ((values
+ ((select a from t1 where a in (values (7) union values (8))))));
+
+values ((values ((select (values(2)) from t1 where a=8))));
+
+--error ER_SUBQUERY_NO_1_ROW
+values ((select (values(2)) from t1 where a<7));
+
+select * from (values ((values ((select a from t1 where a=7))))) dt;
+
+select * from (values ((values ((select (values(2)) from t1 where a=8))))) dt;
+
+insert into t1(a) values ((values (2))), ((values (3)));
+select * from t1;
+
+drop table t1;
+
+--echo End of 10.3 tests
--echo #
--echo # MDEV-22610 Crash in INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT))
@@ -1369,3 +1644,7 @@ INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT));
--error ER_UNKNOWN_ERROR
INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE));
DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_decimal.result b/mysql-test/main/type_decimal.result
index 6e7d7c8b6fe..0228652f74c 100644
--- a/mysql-test/main/type_decimal.result
+++ b/mysql-test/main/type_decimal.result
@@ -176,9 +176,8 @@ Note 1265 Data truncated for column 'a' at row 2
insert ignore into t1 values ("1e+18446744073709551615"),("1e+18446744073709551616"),("1e-9223372036854775807"),("1e-9223372036854775809");
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
-Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t1`.`a` at row 2
+Warning 1264 Out of range value for column 'a' at row 2
Note 1265 Data truncated for column 'a' at row 3
-Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t1`.`a` at row 4
insert ignore into t1 values ("123.4e"),("123.4e+2"),("123.4e-2"),("123e1"),("123e+0");
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
@@ -209,7 +208,7 @@ a
99999999.99
0.00
99999999.99
-0.00
+99999999.99
0.00
0.00
123.40
@@ -1078,6 +1077,90 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
DROP TABLE t1dec102;
#
+# MDEV-24790 CAST('0e1111111111' AS DECIMAL(38,0)) returns a wrong result
+#
+SELECT CAST('0e111111111' AS DECIMAL(38,0)) AS a;
+a
+0
+SELECT CAST('0e1111111111' AS DECIMAL(38,0)) AS a;
+a
+0
+SELECT CAST('.00000000000000000000000000000000000001e111111111111111111111' AS DECIMAL(38,0)) AS a;
+a
+99999999999999999999999999999999999999
+Warnings:
+Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated
+Warning 1292 Truncated incorrect DECIMAL value: '.00000000000000000000000000000000000001e111111111111111111111'
+Warning 1264 Out of range value for column 'a' at row 1
+CREATE TABLE t1 (str VARCHAR(128), comment VARCHAR(128));
+INSERT INTO t1 VALUES
+('0e111111111111111111111', 'Zero mantissa and a huge positive exponent'),
+('1e111111111111111111111', 'Non-zero mantissa, huge positive exponent'),
+('0e-111111111111111111111', 'Zero mantissa and a huge negative exponent'),
+('1e-111111111111111111111', 'Non-zero mantissa and a huge negative exponent');
+BEGIN NOT ATOMIC
+DECLARE done INT DEFAULT FALSE;
+DECLARE vstr, vcomment VARCHAR(128);
+DECLARE cur1 CURSOR FOR SELECT str, comment FROM t1 ORDER BY str;
+DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;
+OPEN cur1;
+read_loop:
+LOOP
+FETCH cur1 INTO vstr, vcomment;
+IF done THEN
+LEAVE read_loop;
+END IF;
+SELECT vstr AS `--------`, vcomment AS `--------`;
+SELECT CAST(str AS DECIMAL(38,0)) FROM t1 WHERE str=vstr;
+SHOW WARNINGS;
+SELECT CAST(CONCAT(str,'garbage') AS DECIMAL(38,0)) FROM t1 WHERE str=vstr;
+SHOW WARNINGS;
+END LOOP;
+END;
+$$
+-------- --------
+0e-111111111111111111111 Zero mantissa and a huge negative exponent
+CAST(str AS DECIMAL(38,0))
+0
+Level Code Message
+CAST(CONCAT(str,'garbage') AS DECIMAL(38,0))
+0
+Level Code Message
+Warning 1292 Truncated incorrect DECIMAL value: '0e-111111111111111111111garbage'
+-------- --------
+0e111111111111111111111 Zero mantissa and a huge positive exponent
+CAST(str AS DECIMAL(38,0))
+0
+Level Code Message
+CAST(CONCAT(str,'garbage') AS DECIMAL(38,0))
+0
+Level Code Message
+Warning 1292 Truncated incorrect DECIMAL value: '0e111111111111111111111garbage'
+-------- --------
+1e-111111111111111111111 Non-zero mantissa and a huge negative exponent
+CAST(str AS DECIMAL(38,0))
+0
+Level Code Message
+CAST(CONCAT(str,'garbage') AS DECIMAL(38,0))
+0
+Level Code Message
+Warning 1292 Truncated incorrect DECIMAL value: '1e-111111111111111111111garbage'
+-------- --------
+1e111111111111111111111 Non-zero mantissa, huge positive exponent
+CAST(str AS DECIMAL(38,0))
+99999999999999999999999999999999999999
+Level Code Message
+Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated
+Warning 1292 Truncated incorrect DECIMAL value: '1e111111111111111111111'
+Warning 1264 Out of range value for column 'CAST(str AS DECIMAL(38,0))' at row 1
+CAST(CONCAT(str,'garbage') AS DECIMAL(38,0))
+99999999999999999999999999999999999999
+Level Code Message
+Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated
+Warning 1292 Truncated incorrect DECIMAL value: '1e111111111111111111111garbage'
+Warning 1264 Out of range value for column 'CAST(CONCAT(str,'garbage') AS DECIMAL(38,0))' at row 1
+DROP TABLE t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/type_decimal.test b/mysql-test/main/type_decimal.test
index 4ffbcbd3288..1076592fe62 100644
--- a/mysql-test/main/type_decimal.test
+++ b/mysql-test/main/type_decimal.test
@@ -670,6 +670,50 @@ DROP TABLE t1;
DROP TABLE t1dec102;
--echo #
+--echo # MDEV-24790 CAST('0e1111111111' AS DECIMAL(38,0)) returns a wrong result
+--echo #
+
+SELECT CAST('0e111111111' AS DECIMAL(38,0)) AS a;
+SELECT CAST('0e1111111111' AS DECIMAL(38,0)) AS a;
+SELECT CAST('.00000000000000000000000000000000000001e111111111111111111111' AS DECIMAL(38,0)) AS a;
+
+CREATE TABLE t1 (str VARCHAR(128), comment VARCHAR(128));
+INSERT INTO t1 VALUES
+('0e111111111111111111111', 'Zero mantissa and a huge positive exponent'),
+('1e111111111111111111111', 'Non-zero mantissa, huge positive exponent'),
+('0e-111111111111111111111', 'Zero mantissa and a huge negative exponent'),
+('1e-111111111111111111111', 'Non-zero mantissa and a huge negative exponent');
+
+# The loop below issues SHOW WARNINGS manually, disable automatic warnings
+--disable_warnings
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE done INT DEFAULT FALSE;
+ DECLARE vstr, vcomment VARCHAR(128);
+ DECLARE cur1 CURSOR FOR SELECT str, comment FROM t1 ORDER BY str;
+ DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;
+ OPEN cur1;
+read_loop:
+ LOOP
+ FETCH cur1 INTO vstr, vcomment;
+ IF done THEN
+ LEAVE read_loop;
+ END IF;
+ SELECT vstr AS `--------`, vcomment AS `--------`;
+ SELECT CAST(str AS DECIMAL(38,0)) FROM t1 WHERE str=vstr;
+ SHOW WARNINGS;
+ SELECT CAST(CONCAT(str,'garbage') AS DECIMAL(38,0)) FROM t1 WHERE str=vstr;
+ SHOW WARNINGS;
+ END LOOP;
+END;
+$$
+DELIMITER ;$$
+--enable_warnings
+
+
+DROP TABLE t1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/type_year.result b/mysql-test/main/type_year.result
index 583a2818e02..26f90757d09 100644
--- a/mysql-test/main/type_year.result
+++ b/mysql-test/main/type_year.result
@@ -1,4 +1,3 @@
-drop table if exists t1;
create table t1 (y year,y2 year(2));
Warnings:
Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
@@ -28,6 +27,9 @@ y y2
2001 01
2069 69
drop table t1;
+#
+# Bug 2335
+#
create table t1 (y year);
insert ignore into t1 values (now());
Warnings:
@@ -36,6 +38,9 @@ select if(y = now(), 1, 0) from t1;
if(y = now(), 1, 0)
1
drop table t1;
+#
+# Bug #27176: Assigning a string to an year column has unexpected results
+#
create table t1(a year);
insert into t1 values (2000.5), ('2000.5'), ('2001a'), ('2.001E3');
Warnings:
@@ -47,7 +52,9 @@ a
2001
2001
drop table t1;
-End of 5.0 tests
+#
+# End of 5.0 tests
+#
#
# Bug #49480: WHERE using YEAR columns returns unexpected results
#
@@ -373,7 +380,8 @@ Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Pleas
Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
DROP TABLE t1;
#
-End of 5.1 tests
+# End of 5.1 tests
+#
create function y2k() returns int deterministic return 2000;
create table t1 (a year(2), b int);
Warnings:
@@ -456,9 +464,6 @@ DROP TABLE t1;
# End of 10.1 tests
#
#
-# Start of 10.2 tests
-#
-#
# MDEV-9392 Copying from DECIMAL to YEAR is not consistent about warnings
#
CREATE TABLE t1 (a YEAR);
@@ -500,6 +505,64 @@ Warnings:
Warning 1264 Out of range value for column 'a' at row 1
DROP TABLE t1;
#
+# Various widths of the YEAR
+#
+create or replace table t1 (a YEAR(0));
+Warnings:
+Note 1287 'YEAR(0)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(1));
+Warnings:
+Note 1287 'YEAR(1)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(2));
+Warnings:
+Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(2) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(3));
+Warnings:
+Note 1287 'YEAR(3)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(4));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(5));
+Warnings:
+Note 1287 'YEAR(5)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t1 (a YEAR(100));
+Warnings:
+Note 1287 'YEAR(100)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` year(4) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/type_year.test b/mysql-test/main/type_year.test
index 19692148146..c1231cc3e79 100644
--- a/mysql-test/main/type_year.test
+++ b/mysql-test/main/type_year.test
@@ -1,9 +1,6 @@
#
# Test year
#
---disable_warnings
-drop table if exists t1;
---enable_warnings
create table t1 (y year,y2 year(2));
insert into t1 values (0,0),(1999,1999),(2000,2000),(2001,2001),(70,70),(69,69);
@@ -12,24 +9,27 @@ select * from t1 order by y;
select * from t1 order by y2;
drop table t1;
-#
-# Bug 2335
-#
+--echo #
+--echo # Bug 2335
+--echo #
create table t1 (y year);
insert ignore into t1 values (now());
select if(y = now(), 1, 0) from t1;
drop table t1;
-#
-# Bug #27176: Assigning a string to an year column has unexpected results
-#
+--echo #
+--echo # Bug #27176: Assigning a string to an year column has unexpected results
+--echo #
+
create table t1(a year);
insert into t1 values (2000.5), ('2000.5'), ('2001a'), ('2.001E3');
select * from t1;
drop table t1;
---echo End of 5.0 tests
+--echo #
+--echo # End of 5.0 tests
+--echo #
--echo #
--echo # Bug #49480: WHERE using YEAR columns returns unexpected results
@@ -169,8 +169,9 @@ ALTER TABLE t1 MODIFY COLUMN c2 YEAR(2);
DROP TABLE t1;
--echo #
+--echo # End of 5.1 tests
+--echo #
---echo End of 5.1 tests
#
# fun with convert_const_to_int
# in some cases 00 is equal to 2000, in others it is not.
@@ -230,10 +231,6 @@ DROP TABLE t1;
--echo #
--echo #
---echo # Start of 10.2 tests
---echo #
-
---echo #
--echo # MDEV-9392 Copying from DECIMAL to YEAR is not consistent about warnings
--echo #
CREATE TABLE t1 (a YEAR);
@@ -268,6 +265,17 @@ SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
ALTER TABLE t1 MODIFY a YEAR;
DROP TABLE t1;
+--echo #
+--echo # Various widths of the YEAR
+--echo #
+create or replace table t1 (a YEAR(0)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(1)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(2)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(3)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(4)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(5)); SHOW CREATE TABLE t1;
+create or replace table t1 (a YEAR(100)); SHOW CREATE TABLE t1;
+drop table t1;
--echo #
--echo # End of 10.2 tests
diff --git a/mysql-test/main/union.result b/mysql-test/main/union.result
index ef3aed397ba..52bc3ccb0dc 100644
--- a/mysql-test/main/union.result
+++ b/mysql-test/main/union.result
@@ -2616,5 +2616,39 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where `test`.`t2`.`a` < 5 except /* select#2 */ select `test`.`t3`.`a` AS `a` from `test`.`t3` where `test`.`t3`.`a` < 5 union all /* select#3 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4
drop table t1,t2,t3;
#
+# MDEV-24387: Wrong number of decimal digits in certain UNION/Subqery
+# constellation
+#
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT * from (SELECT NULL) t;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def CAST(1 AS UNSIGNED) 246 2 1 Y 32896 0 63
+CAST(1 AS UNSIGNED)
+1
+NULL
+SELECT CAST(1 AS SIGNED) UNION ALL SELECT * from (SELECT NULL) t;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def CAST(1 AS SIGNED) 3 2 1 Y 32896 0 63
+CAST(1 AS SIGNED)
+1
+NULL
+SELECT CAST(1 AS SIGNED) UNION ALL SELECT * from (SELECT CAST(1 AS UNSIGNED)) t;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def CAST(1 AS SIGNED) 246 11 1 N 32897 0 63
+CAST(1 AS SIGNED)
+1
+1
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT NULL;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def CAST(1 AS UNSIGNED) 246 2 1 Y 32896 0 63
+CAST(1 AS UNSIGNED)
+1
+NULL
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT CAST(1 AS SIGNED);
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def CAST(1 AS UNSIGNED) 246 2 1 N 32897 0 63
+CAST(1 AS UNSIGNED)
+1
+1
+#
# End of 10.3 tests
#
diff --git a/mysql-test/main/union.test b/mysql-test/main/union.test
index 33adbb4603d..2e5a04a27f4 100644
--- a/mysql-test/main/union.test
+++ b/mysql-test/main/union.test
@@ -1866,5 +1866,23 @@ select * from t1 where a > 4;
drop table t1,t2,t3;
--echo #
+--echo # MDEV-24387: Wrong number of decimal digits in certain UNION/Subqery
+--echo # constellation
+--echo #
+
+--disable_ps_protocol
+--enable_metadata
+
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT * from (SELECT NULL) t;
+SELECT CAST(1 AS SIGNED) UNION ALL SELECT * from (SELECT NULL) t;
+SELECT CAST(1 AS SIGNED) UNION ALL SELECT * from (SELECT CAST(1 AS UNSIGNED)) t;
+
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT NULL;
+SELECT CAST(1 AS UNSIGNED) UNION ALL SELECT CAST(1 AS SIGNED);
+
+--disable_metadata
+--enable_ps_protocol
+
+--echo #
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/main/upgrade_MDEV-23102-1.test b/mysql-test/main/upgrade_MDEV-23102-1.test
index 8491ddfffe4..172e0d595b2 100644
--- a/mysql-test/main/upgrade_MDEV-23102-1.test
+++ b/mysql-test/main/upgrade_MDEV-23102-1.test
@@ -75,7 +75,6 @@ CREATE DEFINER='superuser'@'localhost' SQL SECURITY DEFINER VIEW IF NOT EXISTS u
CAST(IFNULL(JSON_VALUE(Priv, '$.max_statement_time'), 0.0) AS DECIMAL(12,6)) AS max_statement_time
FROM global_priv;
-
SET sql_mode='';
DROP PROCEDURE IF EXISTS mysql.AddGeometryColumn;
diff --git a/mysql-test/main/upgrade_MDEV-23102-2.test b/mysql-test/main/upgrade_MDEV-23102-2.test
index 5ce7682fea6..f2d7ac578e0 100644
--- a/mysql-test/main/upgrade_MDEV-23102-2.test
+++ b/mysql-test/main/upgrade_MDEV-23102-2.test
@@ -75,7 +75,6 @@ CREATE DEFINER='superuser'@'localhost' SQL SECURITY DEFINER VIEW IF NOT EXISTS u
CAST(IFNULL(JSON_VALUE(Priv, '$.max_statement_time'), 0.0) AS DECIMAL(12,6)) AS max_statement_time
FROM global_priv;
-
DROP PROCEDURE IF EXISTS mysql.AddGeometryColumn;
DROP PROCEDURE IF EXISTS mysql.DropGeometryColumn;
diff --git a/mysql-test/main/upgrade_mdev_24363.result b/mysql-test/main/upgrade_mdev_24363.result
new file mode 100644
index 00000000000..d507193088e
--- /dev/null
+++ b/mysql-test/main/upgrade_mdev_24363.result
@@ -0,0 +1,129 @@
+#
+# MDEV-24363 10.4: change definition of mysql.user view
+# to reflect the correct value from mysql.global_priv
+# This change was added because mysql.user view definition
+# was already changed when mariadb.sys was introduced, so
+# it's decently ok if we change it again to fix mdev-24363
+#
+# Test that mysql.user password_expired column
+# shows the right value as in mysql.global_priv
+#
+create user gigi@localhost;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost`
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+N
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost` PASSWORD EXPIRE
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+Y
+drop user gigi@localhost;
+#
+# Test that upgrades from 10.4+ versions before this mdev
+# correctly drop and recreate the mysql.user view
+#
+use mysql;
+set @def = (select view_definition from information_schema.views where table_name='user' and table_schema='mysql');
+set @trimmed_def = (select trim(trailing 'from `mysql`.`global_priv`' from @def));
+set @newdef = (select concat(@trimmed_def, ", 'N' AS password_expired from mysql.global_priv"));
+set @pos = (select instr(@newdef, 'password_expired'));
+create or replace view user as select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,if(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.password_last_changed'),1) = 0,'Y','N') AS `abcsword_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` , 'N' AS password_expired from mysql.global_priv;;
+create user gigi@localhost;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost`
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+N
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost` PASSWORD EXPIRE
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+N
+drop user gigi@localhost;
+# Run mysql_upgrade
+Phase 1/7: Checking and upgrading mysql database
+Processing databases
+mysql
+mysql.column_stats OK
+mysql.columns_priv OK
+mysql.db OK
+mysql.event OK
+mysql.func OK
+mysql.global_priv OK
+mysql.gtid_slave_pos OK
+mysql.help_category OK
+mysql.help_keyword OK
+mysql.help_relation OK
+mysql.help_topic OK
+mysql.index_stats OK
+mysql.innodb_index_stats
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+mysql.innodb_table_stats
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+mysql.plugin OK
+mysql.proc OK
+mysql.procs_priv OK
+mysql.proxies_priv OK
+mysql.roles_mapping OK
+mysql.servers OK
+mysql.table_stats OK
+mysql.tables_priv OK
+mysql.time_zone OK
+mysql.time_zone_leap_second OK
+mysql.time_zone_name OK
+mysql.time_zone_transition OK
+mysql.time_zone_transition_type OK
+mysql.transaction_registry
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+
+Repairing tables
+mysql.innodb_index_stats
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+mysql.innodb_table_stats
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+mysql.transaction_registry
+Error : Unknown storage engine 'InnoDB'
+error : Corrupt
+Phase 2/7: Installing used storage engines... Skipped
+Phase 3/7: Fixing views
+mysql.user OK
+Phase 4/7: Running 'mysql_fix_privilege_tables'
+Phase 5/7: Fixing table and database names
+Phase 6/7: Checking and upgrading tables
+Processing databases
+information_schema
+mtr
+mtr.global_suppressions OK
+mtr.test_suppressions OK
+performance_schema
+test
+Phase 7/7: Running 'FLUSH PRIVILEGES'
+OK
+create user gigi@localhost;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost`
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+N
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+CREATE USER for gigi@localhost
+CREATE USER `gigi`@`localhost` PASSWORD EXPIRE
+select password_expired from mysql.user where user='gigi' and host='localhost';
+password_expired
+Y
+drop user gigi@localhost;
diff --git a/mysql-test/main/upgrade_mdev_24363.test b/mysql-test/main/upgrade_mdev_24363.test
new file mode 100644
index 00000000000..cdb49037a40
--- /dev/null
+++ b/mysql-test/main/upgrade_mdev_24363.test
@@ -0,0 +1,71 @@
+--echo #
+--echo # MDEV-24363 10.4: change definition of mysql.user view
+--echo # to reflect the correct value from mysql.global_priv
+--echo # This change was added because mysql.user view definition
+--echo # was already changed when mariadb.sys was introduced, so
+--echo # it's decently ok if we change it again to fix mdev-24363
+
+--echo #
+--echo # Test that mysql.user password_expired column
+--echo # shows the right value as in mysql.global_priv
+--echo #
+
+create user gigi@localhost;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+drop user gigi@localhost;
+
+--echo #
+--echo # Test that upgrades from 10.4+ versions before this mdev
+--echo # correctly drop and recreate the mysql.user view
+--echo #
+
+--source include/mysql_upgrade_preparation.inc
+
+use mysql;
+
+# Do all these string operations to replace password_expired definition
+# with "'N' as password_expired" and avoid listing in one more test
+# all the fields of the user view
+set @def = (select view_definition from information_schema.views where table_name='user' and table_schema='mysql');
+set @trimmed_def = (select trim(trailing 'from `mysql`.`global_priv`' from @def));
+set @newdef = (select concat(@trimmed_def, ", 'N' AS password_expired from mysql.global_priv"));
+set @pos = (select instr(@newdef, 'password_expired'));
+let $viewdef = `select insert(@newdef, @pos, 3, 'abc')`;
+
+--eval create or replace view user as $viewdef;
+
+create user gigi@localhost;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+# password should be expired, but mysql.user.password_expired should be 'N'
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+drop user gigi@localhost;
+
+--echo # Run mysql_upgrade
+--exec $MYSQL_UPGRADE 2>&1
+let $MYSQLD_DATADIR= `select @@datadir`;
+--file_exists $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+
+create user gigi@localhost;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+# mysql.user view should've been recreated by mariadb_upgrade, thus
+# password_expired should show 'Y'
+alter user gigi@localhost password expire;
+show create user gigi@localhost;
+select password_expired from mysql.user where user='gigi' and host='localhost';
+
+drop user gigi@localhost;
+
diff --git a/mysql-test/main/user_limits.result b/mysql-test/main/user_limits.result
index ffb8bb204a6..acb34754caa 100644
--- a/mysql-test/main/user_limits.result
+++ b/mysql-test/main/user_limits.result
@@ -186,3 +186,30 @@ connection default;
drop user mysqltest_1@localhost;
drop table t1;
set global max_user_connections= @my_max_user_connections;
+#
+# End of 10.1 tests
+#
+#
+# MDEV-17852 Altered connection limits for user have no effect
+#
+create user foo@'%' with max_user_connections 1;
+connect con1,localhost,foo;
+select current_user();
+current_user()
+foo@%
+connect(localhost,foo,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con2,localhost,foo;
+ERROR 42000: User 'foo' has exceeded the 'max_user_connections' resource (current value: 1)
+connection default;
+alter user foo with max_user_connections 2;
+connect con3,localhost,foo;
+select current_user();
+current_user()
+foo@%
+disconnect con3;
+disconnect con1;
+connection default;
+drop user foo@'%';
+#
+# End of 10.2 tests
+#
diff --git a/mysql-test/main/user_limits.test b/mysql-test/main/user_limits.test
index ebb4fd4fb88..36524febd8d 100644
--- a/mysql-test/main/user_limits.test
+++ b/mysql-test/main/user_limits.test
@@ -216,3 +216,29 @@ drop table t1;
--source include/wait_until_count_sessions.inc
set global max_user_connections= @my_max_user_connections;
+
+--echo #
+--echo # End of 10.1 tests
+--echo #
+
+--echo #
+--echo # MDEV-17852 Altered connection limits for user have no effect
+--echo #
+create user foo@'%' with max_user_connections 1;
+--connect con1,localhost,foo
+select current_user();
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_USER_LIMIT_REACHED
+--connect con2,localhost,foo
+--connection default
+alter user foo with max_user_connections 2;
+--connect con3,localhost,foo
+select current_user();
+--disconnect con3
+--disconnect con1
+--connection default
+drop user foo@'%';
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/userstat.result b/mysql-test/main/userstat.result
index 6ba3d0b7811..9152f602304 100644
--- a/mysql-test/main/userstat.result
+++ b/mysql-test/main/userstat.result
@@ -1,4 +1,3 @@
-DROP TABLE IF EXISTS t1;
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
Warnings:
Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
@@ -237,3 +236,17 @@ select @@in_transaction;
0
drop table t1;
set @@global.general_log=@save_general_log;
+#
+# MDEV-25242 Server crashes in check_grant upon invoking function with userstat enabled
+#
+create function f() returns int return (select 1 from performance_schema.threads);
+set global userstat= 1;
+select f() from information_schema.table_statistics;
+ERROR 21000: Subquery returns more than 1 row
+select f() from information_schema.index_statistics;
+ERROR 21000: Subquery returns more than 1 row
+set global userstat= 0;
+drop function f;
+#
+# End of 10.2 tests
+#
diff --git a/mysql-test/main/userstat.test b/mysql-test/main/userstat.test
index 547138cfeaa..6d486810db1 100644
--- a/mysql-test/main/userstat.test
+++ b/mysql-test/main/userstat.test
@@ -5,10 +5,7 @@
-- source include/have_innodb.inc
-- source include/have_log_bin.inc
-
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
+-- source include/have_perfschema.inc
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
show columns from information_schema.client_statistics;
@@ -115,5 +112,20 @@ set @@autocommit=1;
select @@in_transaction;
drop table t1;
-# Cleanup
set @@global.general_log=@save_general_log;
+
+--echo #
+--echo # MDEV-25242 Server crashes in check_grant upon invoking function with userstat enabled
+--echo #
+create function f() returns int return (select 1 from performance_schema.threads);
+set global userstat= 1;
+--error ER_SUBQUERY_NO_1_ROW
+select f() from information_schema.table_statistics;
+--error ER_SUBQUERY_NO_1_ROW
+select f() from information_schema.index_statistics;
+set global userstat= 0;
+drop function f;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index 5718fed4464..de4be4efda5 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -6723,6 +6723,72 @@ DROP PROCEDURE sp;
DROP VIEW v1;
DROP TABLE t1;
#
+# MDEV-24314: create view with derived table without default database
+#
+drop database test;
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3),(7),(1);
+create view db1.v1 as select * from (select * from db1.t1) t;
+show create view db1.v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `db1`.`v1` AS select `t`.`a` AS `a` from (select `db1`.`t1`.`a` AS `a` from `db1`.`t1`) `t` latin1 latin1_swedish_ci
+select * from db1.v1;
+a
+3
+7
+1
+drop view db1.v1;
+prepare stmt from "
+create view db1.v1 as select * from (select * from db1.t1) t;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view db1.v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `db1`.`v1` AS select `t`.`a` AS `a` from (select `db1`.`t1`.`a` AS `a` from `db1`.`t1`) `t` latin1 latin1_swedish_ci
+select * from db1.v1;
+a
+3
+7
+1
+drop view db1.v1;
+drop table db1.t1;
+drop database db1;
+create database test;
+use test;
+#
+# MDEV-16940: update of multi-table view returning error used in SP
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1), (2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (2), (3);
+CREATE VIEW v1 AS SELECT a, b FROM t1,t2;
+CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9;
+CALL sp1;
+ERROR HY000: Can not modify more than one base table through a join view 'test.v1'
+CALL sp1;
+ERROR HY000: Can not modify more than one base table through a join view 'test.v1'
+DROP PROCEDURE sp1;
+DROP VIEW v1;
+DROP TABLE t1, t2;
+#
+# MDEV-23291: SUM column from a derived table returns invalid values
+#
+CREATE TABLE t1(a INT, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+CREATE view v1 AS
+SELECT a as x, (select x) as y, (select y) as z FROM t1;
+SELECT sum(z) FROM (SELECT a as x, (select x) as y, (select y) as z FROM t1) q;
+sum(z)
+3
+SELECT sum(z) FROM v1;
+sum(z)
+3
+DROP TABLE t1;
+DROP VIEW v1;
+#
# End of 10.2 tests
#
#
@@ -6758,3 +6824,19 @@ Drop table t1;
#
# End of 10.3 tests
#
+#
+# MDEV-25206: view specification contains unknown column reference
+#
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int);
+INSERT INTO t2 VALUES (2),(3);
+CREATE TABLE t3 (c int);
+CREATE VIEW v1 AS SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+ERROR 42S22: Unknown column 't1.x' in 'on clause'
+INSERT INTO t3 SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+ERROR 42S22: Unknown column 't1.x' in 'on clause'
+CREATE TABLE t4 AS SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+ERROR 42S22: Unknown column 't1.x' in 'on clause'
+DROP TABLE t1,t2,t3;
+# End of 10.4 tests
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index cf77dd875f2..ca5456930ba 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -6433,6 +6433,74 @@ DROP VIEW v1;
DROP TABLE t1;
--echo #
+--echo # MDEV-24314: create view with derived table without default database
+--echo #
+
+drop database test;
+
+create database db1;
+create table db1.t1 (a int);
+insert into db1.t1 values (3),(7),(1);
+
+create view db1.v1 as select * from (select * from db1.t1) t;
+show create view db1.v1;
+select * from db1.v1;
+drop view db1.v1;
+
+prepare stmt from "
+create view db1.v1 as select * from (select * from db1.t1) t;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view db1.v1;
+select * from db1.v1;
+drop view db1.v1;
+
+drop table db1.t1;
+drop database db1;
+
+create database test;
+use test;
+
+--echo #
+--echo # MDEV-16940: update of multi-table view returning error used in SP
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1), (2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (2), (3);
+
+CREATE VIEW v1 AS SELECT a, b FROM t1,t2;
+
+CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9;
+
+--error ER_VIEW_MULTIUPDATE
+CALL sp1;
+--error ER_VIEW_MULTIUPDATE
+CALL sp1;
+
+DROP PROCEDURE sp1;
+DROP VIEW v1;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-23291: SUM column from a derived table returns invalid values
+--echo #
+
+CREATE TABLE t1(a INT, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+
+CREATE view v1 AS
+SELECT a as x, (select x) as y, (select y) as z FROM t1;
+
+SELECT sum(z) FROM (SELECT a as x, (select x) as y, (select y) as z FROM t1) q;
+SELECT sum(z) FROM v1;
+
+DROP TABLE t1;
+DROP VIEW v1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
@@ -6474,3 +6542,24 @@ Drop table t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # MDEV-25206: view specification contains unknown column reference
+--echo #
+
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int);
+INSERT INTO t2 VALUES (2),(3);
+CREATE TABLE t3 (c int);
+
+--error ER_BAD_FIELD_ERROR
+CREATE VIEW v1 AS SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+--error ER_BAD_FIELD_ERROR
+INSERT INTO t3 SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+--error ER_BAD_FIELD_ERROR
+CREATE TABLE t4 AS SELECT * FROM t1 JOIN t2 ON t1.x > t2.b;
+
+DROP TABLE t1,t2,t3;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/win.result b/mysql-test/main/win.result
index ed10568703c..acc3de96fe8 100644
--- a/mysql-test/main/win.result
+++ b/mysql-test/main/win.result
@@ -3866,6 +3866,32 @@ NULL
DROP VIEW v1;
DROP TABLE t1,t2;
#
+# MDEV-25032 Window functions without column references get removed from ORDER BY
+#
+create table t1 (id int, score double);
+insert into t1 values
+(1, 5),
+(1, 6),
+(1, 6),
+(1, 6),
+(1, 7),
+(1, 8.1),
+(1, 9),
+(1, 10);
+select id, row_number() over () rn
+from t1
+order by rn desc;
+id rn
+1 8
+1 7
+1 6
+1 5
+1 4
+1 3
+1 2
+1 1
+drop table t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/win.test b/mysql-test/main/win.test
index f5ad5e8b778..007e608a0a1 100644
--- a/mysql-test/main/win.test
+++ b/mysql-test/main/win.test
@@ -2523,6 +2523,26 @@ DROP VIEW v1;
DROP TABLE t1,t2;
--echo #
+--echo # MDEV-25032 Window functions without column references get removed from ORDER BY
+--echo #
+
+create table t1 (id int, score double);
+insert into t1 values
+(1, 5),
+(1, 6),
+(1, 6),
+(1, 6),
+(1, 7),
+(1, 8.1),
+(1, 9),
+(1, 10);
+select id, row_number() over () rn
+from t1
+order by rn desc;
+
+drop table t1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 61195934c39..e4ecc910556 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -48,9 +48,7 @@ BEGIN {
"Could not find the lib/ directory \n";
exit(1);
}
-}
-BEGIN {
# Check backward compatibility support
# By setting the environment variable MTR_VERSION
# it's possible to use a previous version of
@@ -95,6 +93,7 @@ use My::Tee;
use My::Find;
use My::SysInfo;
use My::CoreDump;
+use My::Debugger;
use mtr_cases;
use mtr_report;
use mtr_match;
@@ -109,6 +108,9 @@ require "mtr_io.pl";
require "mtr_gprof.pl";
require "mtr_misc.pl";
+my $opt_valgrind;
+my $valgrind_reports= 0;
+
$SIG{INT}= sub { mtr_error("Got ^C signal"); };
$SIG{HUP}= sub { mtr_error("Hangup detected on controlling terminal"); };
@@ -262,28 +264,6 @@ our $opt_gcov;
our $opt_gprof;
our %gprof_dirs;
-our $glob_debugger= 0;
-our $opt_gdb;
-my $opt_rr;
-my $opt_rr_dir;
-my @rr_record_args;
-our $opt_client_gdb;
-my $opt_boot_gdb;
-my $opt_boot_rr;
-our $opt_dbx;
-our $opt_client_dbx;
-my $opt_boot_dbx;
-our $opt_ddd;
-our $opt_client_ddd;
-my $opt_boot_ddd;
-our $opt_manual_gdb;
-our $opt_manual_lldb;
-our $opt_manual_dbx;
-our $opt_manual_ddd;
-our $opt_manual_debug;
-our $opt_debugger;
-our $opt_client_debugger;
-
my $config; # The currently running config
my $current_config_name; # The currently running config file template
@@ -309,51 +289,30 @@ our $opt_report_times= 0;
my $opt_sleep;
-my $opt_testcase_timeout= $ENV{MTR_TESTCASE_TIMEOUT} || 15; # minutes
-my $opt_suite_timeout = $ENV{MTR_SUITE_TIMEOUT} || 360; # minutes
-my $opt_shutdown_timeout= $ENV{MTR_SHUTDOWN_TIMEOUT} || 10; # seconds
-my $opt_start_timeout = $ENV{MTR_START_TIMEOUT} || 180; # seconds
+our $opt_retry= 1;
+our $opt_retry_failure= env_or_val(MTR_RETRY_FAILURE => 2);
+our $opt_testcase_timeout= $ENV{MTR_TESTCASE_TIMEOUT} || 15; # minutes
+our $opt_suite_timeout = $ENV{MTR_SUITE_TIMEOUT} || 360; # minutes
+our $opt_shutdown_timeout= $ENV{MTR_SHUTDOWN_TIMEOUT} || 10; # seconds
+our $opt_start_timeout = $ENV{MTR_START_TIMEOUT} || 180; # seconds
sub suite_timeout { return $opt_suite_timeout * 60; };
my $opt_wait_all;
my $opt_user_args;
my $opt_repeat= 1;
-my $opt_retry= 1;
-my $opt_retry_failure= env_or_val(MTR_RETRY_FAILURE => 2);
my $opt_reorder= 1;
my $opt_force_restart= 0;
our $opt_user = "root";
-our $opt_valgrind= 0;
-my $opt_valgrind_mysqld= 0;
-my $opt_valgrind_mysqltest= 0;
-my @valgrind_args;
-my $opt_strace= 0;
-my $opt_stracer;
-my $opt_client_strace = 0;
-my @strace_args;
-my $opt_valgrind_path;
-my $valgrind_reports= 0;
-my $opt_callgrind;
my %mysqld_logs;
my $opt_debug_sync_timeout= 300; # Default timeout for WAIT_FOR actions.
my $warn_seconds = 60;
my $rebootstrap_re= '--innodb[-_](?:page[-_]size|checksum[-_]algorithm|undo[-_]tablespaces|log[-_]group[-_]home[-_]dir|data[-_]home[-_]dir)|data[-_]file[-_]path|force_rebootstrap';
-sub testcase_timeout ($) {
- my ($tinfo)= @_;
- if (exists $tinfo->{'case-timeout'}) {
- # Return test specific timeout if *longer* that the general timeout
- my $test_to= $tinfo->{'case-timeout'};
- $test_to*= 10 if $opt_valgrind;
- return $test_to * 60 if $test_to > $opt_testcase_timeout;
- }
- return $opt_testcase_timeout * 60;
-}
-
+sub testcase_timeout ($) { return $opt_testcase_timeout * 60; }
sub check_timeout ($) { return testcase_timeout($_[0]); }
our $opt_warnings= 1;
@@ -769,9 +728,13 @@ sub run_test_server ($$$) {
rename $log_file_name, $log_file_name.".failed";
}
- delete($result->{result});
- $result->{retries}= $retries+1;
- $result->write_test($sock, 'TESTCASE');
+ {
+ local @$result{'retries', 'result'};
+ delete $result->{result};
+ $result->{retries}= $retries+1;
+ $result->write_test($sock, 'TESTCASE');
+ }
+ push(@$completed, $result);
next;
}
}
@@ -1006,7 +969,7 @@ sub run_worker ($) {
}
mark_time_used('restart');
my $valgrind_reports= 0;
- if ($opt_valgrind_mysqld) {
+ if ($opt_valgrind) {
$valgrind_reports= valgrind_exit_reports();
print $server "VALGREP\n" if $valgrind_reports;
}
@@ -1068,8 +1031,6 @@ sub print_global_resfile {
resfile_global("debug", $opt_debug ? 1 : 0);
resfile_global("gcov", $opt_gcov ? 1 : 0);
resfile_global("gprof", $opt_gprof ? 1 : 0);
- resfile_global("valgrind", $opt_valgrind ? 1 : 0);
- resfile_global("callgrind", $opt_callgrind ? 1 : 0);
resfile_global("mem", $opt_mem);
resfile_global("tmpdir", $opt_tmpdir);
resfile_global("vardir", $opt_vardir);
@@ -1159,30 +1120,6 @@ sub command_line_setup {
'debug' => \$opt_debug,
'debug-common' => \$opt_debug_common,
'debug-server' => \$opt_debug_server,
- 'gdb=s' => \$opt_gdb,
- 'rr' => \$opt_rr,
- 'rr-arg=s' => \@rr_record_args,
- 'rr-dir=s' => \$opt_rr_dir,
- 'client-gdb' => \$opt_client_gdb,
- 'manual-gdb' => \$opt_manual_gdb,
- 'manual-lldb' => \$opt_manual_lldb,
- 'boot-gdb' => \$opt_boot_gdb,
- 'boot-rr' => \$opt_boot_rr,
- 'manual-debug' => \$opt_manual_debug,
- 'ddd' => \$opt_ddd,
- 'client-ddd' => \$opt_client_ddd,
- 'manual-ddd' => \$opt_manual_ddd,
- 'boot-ddd' => \$opt_boot_ddd,
- 'dbx' => \$opt_dbx,
- 'client-dbx' => \$opt_client_dbx,
- 'manual-dbx' => \$opt_manual_dbx,
- 'debugger=s' => \$opt_debugger,
- 'boot-dbx' => \$opt_boot_dbx,
- 'client-debugger=s' => \$opt_client_debugger,
- 'strace' => \$opt_strace,
- 'strace-option=s' => \@strace_args,
- 'client-strace' => \$opt_client_strace,
- 'stracer=s' => \$opt_stracer,
'max-save-core=i' => \$opt_max_save_core,
'max-save-datadir=i' => \$opt_max_save_datadir,
'max-test-fail=i' => \$opt_max_test_fail,
@@ -1191,23 +1128,6 @@ sub command_line_setup {
# Coverage, profiling etc
'gcov' => \$opt_gcov,
'gprof' => \$opt_gprof,
- 'valgrind|valgrind-all' => \$opt_valgrind,
- 'valgrind-mysqltest' => \$opt_valgrind_mysqltest,
- 'valgrind-mysqld' => \$opt_valgrind_mysqld,
- 'valgrind-options=s' => sub {
- my ($opt, $value)= @_;
- # Deprecated option unless it's what we know pushbuild uses
- if ($value eq "--gen-suppressions=all --show-reachable=yes") {
- push(@valgrind_args, $_) for (split(' ', $value));
- return;
- }
- die("--valgrind-options=s is deprecated. Use ",
- "--valgrind-option=s, to be specified several",
- " times if necessary");
- },
- 'valgrind-option=s' => \@valgrind_args,
- 'valgrind-path=s' => \$opt_valgrind_path,
- 'callgrind' => \$opt_callgrind,
'debug-sync-timeout=i' => \$opt_debug_sync_timeout,
# Directories
@@ -1257,12 +1177,13 @@ sub command_line_setup {
# list-options is internal, not listed in help
'list-options' => \$opt_list_options,
'skip-test-list=s' => \@opt_skip_test_list,
- 'xml-report=s' => \$opt_xml_report
+ 'xml-report=s' => \$opt_xml_report,
+
+ My::Debugger::options()
);
# fix options (that take an optional argument and *only* after = sign
- my %fixopt = ( '--gdb' => '--gdb=#' );
- @ARGV = map { $fixopt{$_} or $_ } @ARGV;
+ @ARGV = My::Debugger::fix_options(@ARGV);
GetOptions(%options) or usage("Can't read options");
usage("") if $opt_usage;
list_options(\%options) if $opt_list_options;
@@ -1587,39 +1508,6 @@ sub command_line_setup {
{
mtr_error("Can't use --extern with --embedded-server");
}
-
-
- if ($opt_gdb)
- {
- $opt_client_gdb= $opt_gdb;
- $opt_gdb= undef;
- }
-
- if ($opt_ddd)
- {
- $opt_client_ddd= $opt_ddd;
- $opt_ddd= undef;
- }
-
- if ($opt_dbx) {
- mtr_warning("Silently converting --dbx to --client-dbx in embedded mode");
- $opt_client_dbx= $opt_dbx;
- $opt_dbx= undef;
- }
-
- if ($opt_debugger)
- {
- $opt_client_debugger= $opt_debugger;
- $opt_debugger= undef;
- }
-
- if ( $opt_gdb || $opt_ddd || $opt_manual_gdb || $opt_manual_lldb ||
- $opt_manual_ddd || $opt_manual_debug || $opt_debugger || $opt_dbx ||
- $opt_manual_dbx)
- {
- mtr_error("You need to use the client debug options for the",
- "embedded server. Ex: --client-gdb");
- }
}
# --------------------------------------------------------------------------
@@ -1640,42 +1528,6 @@ sub command_line_setup {
}
# --------------------------------------------------------------------------
- # Check debug related options
- # --------------------------------------------------------------------------
- if ( $opt_gdb || $opt_client_gdb || $opt_ddd || $opt_client_ddd || $opt_rr ||
- $opt_manual_gdb || $opt_manual_lldb || $opt_manual_ddd ||
- $opt_manual_debug || $opt_dbx || $opt_client_dbx || $opt_manual_dbx ||
- $opt_debugger || $opt_client_debugger )
- {
- $ENV{ASAN_OPTIONS}= 'abort_on_error=1:'.($ENV{ASAN_OPTIONS} || '');
- if ( using_extern() )
- {
- mtr_error("Can't use --extern when using debugger");
- }
- # Indicate that we are using debugger
- $glob_debugger= 1;
- $opt_retry= 1;
- $opt_retry_failure= 1;
- # Set one week timeout (check-testcase timeout will be 1/10th)
- $opt_testcase_timeout= 7 * 24 * 60;
- $opt_suite_timeout= 7 * 24 * 60;
- # One day to shutdown
- $opt_shutdown_timeout= 24 * 60;
- # One day for PID file creation (this is given in seconds not minutes)
- $opt_start_timeout= 24 * 60 * 60;
- if ($opt_rr && open(my $fh, '<', '/proc/sys/kernel/perf_event_paranoid'))
- {
- my $perf_event_paranoid= <$fh>;
- close $fh;
- chomp $perf_event_paranoid;
- if ($perf_event_paranoid == 0)
- {
- mtr_error("rr requires kernel.perf_event_paranoid set to 1");
- }
- }
- }
-
- # --------------------------------------------------------------------------
# Modified behavior with --start options
# --------------------------------------------------------------------------
if ($opt_start or $opt_start_dirty or $opt_start_exit) {
@@ -1736,75 +1588,6 @@ sub command_line_setup {
"for option --testsuite-timeout")
if ($opt_suite_timeout <= 0);
- # --------------------------------------------------------------------------
- # Check valgrind arguments
- # --------------------------------------------------------------------------
- if ( $opt_valgrind or $opt_valgrind_path or @valgrind_args)
- {
- mtr_report("Turning on valgrind for all executables");
- $opt_valgrind= 1;
- $opt_valgrind_mysqld= 1;
- $opt_valgrind_mysqltest= 1;
- }
- elsif ( $opt_valgrind_mysqld )
- {
- mtr_report("Turning on valgrind for mysqld(s) only");
- $opt_valgrind= 1;
- }
- elsif ( $opt_valgrind_mysqltest )
- {
- mtr_report("Turning on valgrind for mysqltest and mysql_client_test only");
- $opt_valgrind= 1;
- }
-
- if ($opt_valgrind)
- {
- # Increase the timeouts when running with valgrind
- $opt_testcase_timeout*= 10;
- $opt_suite_timeout*= 6;
- $opt_start_timeout*= 10;
- $warn_seconds*= 10;
- }
-
- if ( $opt_callgrind )
- {
- mtr_report("Turning on valgrind with callgrind for mysqld(s)");
- $opt_valgrind= 1;
- $opt_valgrind_mysqld= 1;
-
- # Set special valgrind options unless options passed on command line
- push(@valgrind_args, "--trace-children=yes")
- unless @valgrind_args;
- unshift(@valgrind_args, "--tool=callgrind");
- }
-
- # default to --tool=memcheck
- if ($opt_valgrind && ! grep(/^--tool=/i, @valgrind_args))
- {
- # Set valgrind_option unless already defined
- push(@valgrind_args, ("--show-reachable=yes", "--leak-check=yes",
- "--num-callers=16"))
- unless @valgrind_args;
- unshift(@valgrind_args, "--tool=memcheck");
- }
-
- if ( $opt_valgrind )
- {
- # Make valgrind run in quiet mode so it only print errors
- push(@valgrind_args, "--quiet" );
-
- push(@valgrind_args, "--suppressions=${glob_mysql_test_dir}/valgrind.supp")
- if -f "$glob_mysql_test_dir/valgrind.supp";
-
- mtr_report("Running valgrind with options \"",
- join(" ", @valgrind_args), "\"");
- }
-
- if (@strace_args || $opt_stracer)
- {
- $opt_strace=1;
- }
-
if ($opt_debug_common)
{
$opt_debug= 1;
@@ -1948,7 +1731,6 @@ sub collect_mysqld_features {
}
-
sub collect_mysqld_features_from_running_server ()
{
my $mysql= mtr_exe_exists("$path_client_bindir/mysql");
@@ -2019,21 +1801,6 @@ sub executable_setup () {
$exe_patch='patch' if `patch -v`;
- #
- # Check if libtool is available in this distribution/clone
- # we need it when valgrinding or debugging non installed binary
- # Otherwise valgrind will valgrind the libtool wrapper or bash
- # and gdb will not find the real executable to debug
- #
- if ( -x "../libtool")
- {
- $exe_libtool= "../libtool";
- if ($opt_valgrind or $glob_debugger or $opt_strace)
- {
- mtr_report("Using \"$exe_libtool\" when running valgrind, strace or debugger");
- }
- }
-
# Look for the client binaries
$exe_mysqladmin= mtr_exe_exists("$path_client_bindir/mysqladmin");
$exe_mysql= mtr_exe_exists("$path_client_bindir/mysql");
@@ -2158,9 +1925,6 @@ sub mysql_client_test_arguments(){
my $args;
mtr_init_args(\$args);
- if ( $opt_valgrind_mysqltest ) {
- valgrind_arguments($args, \$exe);
- }
mtr_add_arg($args, "--defaults-file=%s", $path_config_file);
mtr_add_arg($args, "--testcase");
mtr_add_arg($args, "--vardir=$opt_vardir");
@@ -2206,6 +1970,8 @@ sub environment_setup {
umask(022);
+ $ENV{'USE_RUNNING_SERVER'}= using_extern();
+
my @ld_library_paths;
if ($path_client_libdir)
@@ -2236,30 +2002,12 @@ sub environment_setup {
}
}
- # --------------------------------------------------------------------------
- # Valgrind need to be run with debug libraries otherwise it's almost
- # impossible to add correct supressions, that means if "/usr/lib/debug"
- # is available, it should be added to
- # LD_LIBRARY_PATH
- #
- # But pthread is broken in libc6-dbg on Debian <= 3.1 (see Debian
- # bug 399035, http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=399035),
- # so don't change LD_LIBRARY_PATH on that platform.
- # --------------------------------------------------------------------------
- my $debug_libraries_path= "/usr/lib/debug";
- my $deb_version;
- if ( $opt_valgrind and -d $debug_libraries_path and
- (! -e '/etc/debian_version' or
- ($deb_version=
- mtr_grab_file('/etc/debian_version')) !~ /^[0-9]+\.[0-9]$/ or
- $deb_version > 3.1 ) )
- {
- push(@ld_library_paths, $debug_libraries_path);
- }
-
$ENV{'LD_LIBRARY_PATH'}= join(":", @ld_library_paths,
$ENV{'LD_LIBRARY_PATH'} ?
split(':', $ENV{'LD_LIBRARY_PATH'}) : ());
+
+ My::Debugger::pre_setup();
+
mtr_debug("LD_LIBRARY_PATH: $ENV{'LD_LIBRARY_PATH'}");
$ENV{'DYLD_LIBRARY_PATH'}= join(":", @ld_library_paths,
@@ -2297,7 +2045,6 @@ sub environment_setup {
$ENV{'OPENSSL_CONF'}= $mysqld_variables{'version-ssl-library'} gt 'OpenSSL 1.1.1'
? "$glob_mysql_test_dir/lib/openssl.cnf" : '/dev/null';
- $ENV{'USE_RUNNING_SERVER'}= using_extern();
$ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir;
$ENV{'DEFAULT_MASTER_PORT'}= $mysqld_variables{'port'};
$ENV{'MYSQL_TMP_DIR'}= $opt_tmpdir;
@@ -2466,10 +2213,6 @@ sub environment_setup {
$ENV{INNOBACKUPEX}= "$exe_mariabackup --innobackupex";
- # Create an environment variable to make it possible
- # to detect that valgrind is being used from test cases
- $ENV{'VALGRIND_TEST'}= $opt_valgrind;
-
# Add dir of this perl to aid mysqltest in finding perl
my $perldir= dirname($^X);
my $pathsep= ":";
@@ -3222,6 +2965,7 @@ sub mysql_install_db {
mtr_add_arg($args, "--tmpdir=%s", "$opt_vardir/tmp/");
mtr_add_arg($args, "--core-file");
mtr_add_arg($args, "--console");
+ mtr_add_arg($args, "--character-set-server=latin1");
if ( $opt_debug )
{
@@ -3272,25 +3016,7 @@ sub mysql_install_db {
if (! -e $bootstrap_sql_file)
{
- if ($opt_boot_gdb) {
- gdb_arguments(\$args, \$exe_mysqld_bootstrap, $mysqld->name(),
- $bootstrap_sql_file);
- }
- if ($opt_boot_dbx) {
- dbx_arguments(\$args, \$exe_mysqld_bootstrap, $mysqld->name(),
- $bootstrap_sql_file);
- }
- if ($opt_boot_ddd) {
- ddd_arguments(\$args, \$exe_mysqld_bootstrap, $mysqld->name(),
- $bootstrap_sql_file);
- }
- if ($opt_boot_rr) {
- $args= ["record", @rr_record_args, $exe_mysqld_bootstrap, @$args];
- $exe_mysqld_bootstrap= "rr";
- my $rr_dir= $opt_rr_dir ? $opt_rr_dir : "$opt_vardir/rr.boot";
- $ENV{'_RR_TRACE_DIR'}= $rr_dir;
- mkpath($rr_dir);
- }
+ My::Debugger::setup_boot_args(\$args, \$exe_mysqld_bootstrap, $bootstrap_sql_file);
my $path_sql= my_find_file($install_basedir,
["mysql", "sql/share", "share/mariadb",
@@ -4649,7 +4375,13 @@ sub extract_warning_lines ($$) {
qr/InnoDB: Table .*mysql.*innodb_table_stats.* not found./,
qr/InnoDB: User stopword table .* does not exist./,
qr/Dump thread [0-9]+ last sent to server [0-9]+ binlog file:pos .+/,
- qr/Detected table cache mutex contention at instance .* waits. Additional table cache instance cannot be activated: consider raising table_open_cache_instances. Number of active instances/
+ qr/Detected table cache mutex contention at instance .* waits. Additional table cache instance cannot be activated: consider raising table_open_cache_instances. Number of active instances/,
+
+ # for UBSAN
+ qr/decimal\.c.*: runtime error: signed integer overflow/,
+ # Disable test for UBSAN on dynamically loaded objects
+ qr/runtime error: member call.*object.*'Handler_share'/,
+ qr/sql_type\.cc.* runtime error: member call.*object.* 'Type_collection'/,
);
my $matched_lines= [];
@@ -5034,7 +4766,7 @@ sub after_failure ($) {
sub report_failure_and_restart ($) {
my $tinfo= shift;
- if ($opt_valgrind_mysqld && ($tinfo->{'warnings'} || $tinfo->{'timeout'}) &&
+ if ($opt_valgrind && ($tinfo->{'warnings'} || $tinfo->{'timeout'}) &&
$opt_core_on_failure == 0)
{
# In these cases we may want valgrind report from normal termination
@@ -5164,12 +4896,6 @@ sub mysqld_arguments ($$$) {
# Check if "extra_opt" contains --log-bin
my $skip_binlog= not grep /^--(loose-)?log-bin/, @$extra_opts;
- # Indicate to mysqld it will be debugged in debugger
- if ( $glob_debugger )
- {
- mtr_add_arg($args, "--gdb");
- }
-
my $found_skip_core= 0;
foreach my $arg ( @$extra_opts )
{
@@ -5218,7 +4944,6 @@ sub mysqld_start ($$) {
mtr_verbose(My::Options::toStr("mysqld_start", @$extra_opts));
my $exe= find_mysqld($mysqld->value('basedir'));
- my $wait_for_pid_file= 1;
mtr_error("Internal error: mysqld should never be started for embedded")
if $opt_embedded_server;
@@ -5226,15 +4951,6 @@ sub mysqld_start ($$) {
my $args;
mtr_init_args(\$args);
- if ( $opt_valgrind_mysqld and not $opt_gdb and not $opt_manual_gdb )
- {
- valgrind_arguments($args, \$exe);
- }
- if ( $opt_strace)
- {
- strace_arguments($args, \$exe, $mysqld->name());
- }
-
mtr_add_arg($args, "--defaults-group-suffix=%s", $mysqld->after('mysqld'));
# Add any additional options from an in-test restart
@@ -5258,49 +4974,8 @@ sub mysqld_start ($$) {
# options from *.opt and *.combination files.
$ENV{'MYSQLD_LAST_CMD'}= "$exe @$args";
- if ( $opt_gdb || $opt_manual_gdb )
- {
- gdb_arguments(\$args, \$exe, $mysqld->name());
- }
- elsif ( $opt_manual_lldb )
- {
- lldb_arguments(\$args, \$exe, $mysqld->name());
- }
- elsif ( $opt_ddd || $opt_manual_ddd )
- {
- ddd_arguments(\$args, \$exe, $mysqld->name());
- }
- elsif ( $opt_dbx || $opt_manual_dbx ) {
- dbx_arguments(\$args, \$exe, $mysqld->name());
- }
- elsif ( $opt_debugger )
- {
- debugger_arguments(\$args, \$exe, $mysqld->name());
- }
- elsif ( $opt_manual_debug )
- {
- print "\nStart " .$mysqld->name()." in your debugger\n" .
- "dir: $glob_mysql_test_dir\n" .
- "exe: $exe\n" .
- "args: " . join(" ", @$args) . "\n\n" .
- "Waiting ....\n";
-
- # Indicate the exe should not be started
- $exe= undef;
- }
- elsif ( $opt_rr )
- {
- $args= ["record", @rr_record_args, "$exe", @$args];
- $exe= "rr";
- my $rr_dir= $opt_rr_dir ? $opt_rr_dir : "$opt_vardir/rr". $mysqld->after('mysqld');
- $ENV{'_RR_TRACE_DIR'}= $rr_dir;
- mkpath($rr_dir);
- }
- else
- {
- # Default to not wait until pid file has been created
- $wait_for_pid_file= 0;
- }
+ My::Debugger::setup_args(\$args, \$exe, $mysqld->name());
+ $ENV{'VALGRIND_TEST'}= $opt_valgrind = int(($exe || '') eq 'valgrind');
# Remove the old pidfile if any
unlink($mysqld->value('pid-file'));
@@ -5349,11 +5024,8 @@ sub mysqld_start ($$) {
mtr_verbose("Started $mysqld->{proc}");
}
- if ( $wait_for_pid_file &&
- !sleep_until_file_created($mysqld->value('pid-file'),
- $opt_start_timeout,
- $mysqld->{'proc'},
- $warn_seconds))
+ if (!sleep_until_file_created($mysqld->value('pid-file'),
+ $opt_start_timeout, $mysqld->{'proc'}, $warn_seconds))
{
my $mname= $mysqld->name();
mtr_error("Failed to start mysqld $mname with command $exe");
@@ -5800,13 +5472,6 @@ sub start_mysqltest ($) {
mtr_add_arg($args, "--sleep=%d", $opt_sleep);
}
- if ( $opt_valgrind )
- {
- # We are running server under valgrind, which causes some replication
- # test to be much slower, notable rpl_mdev6020. Increase timeout.
- mtr_add_arg($args, "--wait-for-pos-timeout=1500");
- }
-
if ( $opt_ssl )
{
# Turn on SSL for _all_ test cases if option --ssl was used
@@ -5839,31 +5504,6 @@ sub start_mysqltest ($) {
# ----------------------------------------------------------------------
$ENV{'MYSQL_TEST'}= mtr_args2str($exe_mysqltest, @$args);
- # ----------------------------------------------------------------------
- # Add arguments that should not go into the MYSQL_TEST env var
- # ----------------------------------------------------------------------
- if ( $opt_valgrind_mysqltest )
- {
- # Prefix the Valgrind options to the argument list.
- # We do this here, since we do not want to Valgrind the nested invocations
- # of mysqltest; that would mess up the stderr output causing test failure.
- my @args_saved = @$args;
- mtr_init_args(\$args);
- valgrind_arguments($args, \$exe);
- mtr_add_arg($args, "%s", $_) for @args_saved;
- }
-
- # ----------------------------------------------------------------------
- # Prefix the strace options to the argument list.
- # ----------------------------------------------------------------------
- if ( $opt_client_strace )
- {
- my @args_saved = @$args;
- mtr_init_args(\$args);
- strace_arguments($args, \$exe, "mysqltest");
- mtr_add_arg($args, "%s", $_) for @args_saved;
- }
-
if ($opt_force > 1)
{
mtr_add_arg($args, "--continue-on-error");
@@ -5899,21 +5539,7 @@ sub start_mysqltest ($) {
}
}
- if ( $opt_client_gdb )
- {
- gdb_arguments(\$args, \$exe, "client");
- }
- elsif ( $opt_client_ddd )
- {
- ddd_arguments(\$args, \$exe, "client");
- }
- if ( $opt_client_dbx ) {
- dbx_arguments(\$args, \$exe, "client");
- }
- elsif ( $opt_client_debugger )
- {
- debugger_arguments(\$args, \$exe, "client");
- }
+ My::Debugger::setup_client_args(\$args, \$exe);
my $proc= My::SafeProcess->new
(
@@ -5929,287 +5555,6 @@ sub start_mysqltest ($) {
}
#
-# Modify the exe and args so that program is run in gdb in xterm
-#
-sub gdb_arguments {
- my $args= shift;
- my $exe= shift;
- my $type= shift;
- my $input= shift;
-
- my $gdb_init_file= "$opt_vardir/tmp/gdbinit.$type";
-
- # Remove the old gdbinit file
- unlink($gdb_init_file);
-
- # Put $args into a single string
- $input = $input ? "< $input" : "";
-
- if ($type eq 'client') {
- mtr_tofile($gdb_init_file, "set args @$$args $input");
- } elsif ($opt_valgrind_mysqld) {
- my $v = $$exe;
- my $vargs = [];
- valgrind_arguments($vargs, \$v);
- mtr_tofile($gdb_init_file, <<EOF);
-shell @My::SafeProcess::safe_process_cmd --parent-pid=`pgrep -x gdb` -- $v --vgdb-error=0 @$vargs @$$args &
-shell sleep 1
-target remote | /usr/lib64/valgrind/../../bin/vgdb
-EOF
- } else {
- mtr_tofile($gdb_init_file,
- join("\n",
- "set args @$$args $input",
- split /;/, $opt_gdb || ""
- ));
- }
-
- if ( $opt_manual_gdb )
- {
- print "\nTo start gdb for $type, type in another window:\n";
- print "gdb -cd $glob_mysql_test_dir -x $gdb_init_file $$exe\n";
-
- # Indicate the exe should not be started
- $$exe= undef;
- return;
- }
-
- $$args= [];
- mtr_add_arg($$args, "-title");
- mtr_add_arg($$args, "$type");
- mtr_add_arg($$args, "-e");
-
- if ( $exe_libtool )
- {
- mtr_add_arg($$args, $exe_libtool);
- mtr_add_arg($$args, "--mode=execute");
- }
-
- mtr_add_arg($$args, "gdb");
- mtr_add_arg($$args, "-x");
- mtr_add_arg($$args, "$gdb_init_file");
- mtr_add_arg($$args, "$$exe");
-
- $$exe= "xterm";
-}
-
-#
-# Modify the exe and args so that program is run in lldb
-#
-sub lldb_arguments {
- my $args= shift;
- my $exe= shift;
- my $type= shift;
- my $input= shift;
-
- my $lldb_init_file= "$opt_vardir/tmp/lldbinit.$type";
- unlink($lldb_init_file);
-
- # Put $args into a single string
- my $str= join(" ", @$$args);
- $input = $input ? "< $input" : "";
-
- # write init file for mysqld or client
- mtr_tofile($lldb_init_file, "process launch --stop-at-entry -- $str $input\n");
-
- print "\nTo start lldb for $type, type in another window:\n";
- print "cd $glob_mysql_test_dir && lldb -s $lldb_init_file $$exe\n";
-
- # Indicate the exe should not be started
- $$exe= undef;
- return;
-}
-
-#
-# Modify the exe and args so that program is run in ddd
-#
-sub ddd_arguments {
- my $args= shift;
- my $exe= shift;
- my $type= shift;
- my $input= shift;
-
- my $gdb_init_file= "$opt_vardir/tmp/gdbinit.$type";
-
- # Remove the old gdbinit file
- unlink($gdb_init_file);
-
- # Put $args into a single string
- my $str= join(" ", @$$args);
- $input = $input ? "< $input" : "";
-
- # write init file for mysqld or client
- mtr_tofile($gdb_init_file, "file $$exe\nset args $str $input\n");
-
- if ( $opt_manual_ddd )
- {
- print "\nTo start ddd for $type, type in another window:\n";
- print "ddd -cd $glob_mysql_test_dir -x $gdb_init_file $$exe\n";
-
- # Indicate the exe should not be started
- $$exe= undef;
- return;
- }
-
- my $save_exe= $$exe;
- $$args= [];
- if ( $exe_libtool )
- {
- $$exe= $exe_libtool;
- mtr_add_arg($$args, "--mode=execute");
- mtr_add_arg($$args, "ddd");
- }
- else
- {
- $$exe= "ddd";
- }
- mtr_add_arg($$args, "--command=$gdb_init_file");
- mtr_add_arg($$args, "$save_exe");
-}
-
-
-#
-# Modify the exe and args so that program is run in dbx in xterm
-#
-sub dbx_arguments {
- my $args= shift;
- my $exe= shift;
- my $type= shift;
- my $input= shift;
-
- # Put $args into a single string
- my $str= join " ", @$$args;
- my $runline= $input ? "run $str < $input" : "run $str";
-
- if ( $opt_manual_dbx ) {
- print "\nTo start dbx for $type, type in another window:\n";
- print "cd $glob_mysql_test_dir; dbx -c \"stop in main; " .
- "$runline\" $$exe\n";
-
- # Indicate the exe should not be started
- $$exe= undef;
- return;
- }
-
- $$args= [];
- mtr_add_arg($$args, "-title");
- mtr_add_arg($$args, "$type");
- mtr_add_arg($$args, "-e");
-
- if ( $exe_libtool ) {
- mtr_add_arg($$args, $exe_libtool);
- mtr_add_arg($$args, "--mode=execute");
- }
-
- mtr_add_arg($$args, "dbx");
- mtr_add_arg($$args, "-c");
- mtr_add_arg($$args, "stop in main; $runline");
- mtr_add_arg($$args, "$$exe");
-
- $$exe= "xterm";
-}
-
-
-#
-# Modify the exe and args so that program is run in the selected debugger
-#
-sub debugger_arguments {
- my $args= shift;
- my $exe= shift;
- my $debugger= $opt_debugger || $opt_client_debugger;
-
- if ( $debugger =~ /vcexpress|vc|devenv/ )
- {
- # vc[express] /debugexe exe arg1 .. argn
-
- # Add name of the exe and /debugexe before args
- unshift(@$$args, "$$exe");
- unshift(@$$args, "/debugexe");
-
- # Set exe to debuggername
- $$exe= $debugger;
-
- }
- elsif ( $debugger =~ /windbg|vsjitdebugger/ )
- {
- # windbg exe arg1 .. argn
-
- # Add name of the exe before args
- unshift(@$$args, "$$exe");
-
- # Set exe to debuggername
- $$exe= $debugger;
-
- }
- else
- {
- mtr_error("Unknown argument \"$debugger\" passed to --debugger");
- }
-}
-
-#
-# Modify the exe and args so that program is run in valgrind
-#
-sub valgrind_arguments {
- my $args= shift;
- my $exe= shift;
-
- # Ensure the jemalloc works with mysqld
- if ($$exe =~ /mysqld/)
- {
- my %somalloc=(
- 'system jemalloc' => 'libjemalloc*',
- 'bundled jemalloc' => 'NONE'
- );
- my ($syn) = $somalloc{$mysqld_variables{'version-malloc-library'}};
- mtr_add_arg($args, '--soname-synonyms=somalloc=%s', $syn) if $syn;
- }
-
- # Add valgrind options, can be overridden by user
- mtr_add_arg($args, '%s', $_) for (@valgrind_args);
-
- mtr_add_arg($args, $$exe);
-
- $$exe= $opt_valgrind_path || "valgrind";
-
- if ($exe_libtool)
- {
- # Add "libtool --mode-execute" before the test to execute
- # if running in valgrind(to avoid valgrinding bash)
- unshift(@$args, "--mode=execute", $$exe);
- $$exe= $exe_libtool;
- }
-}
-
-#
-# Modify the exe and args so that program is run in strace
-#
-sub strace_arguments {
- my $args= shift;
- my $exe= shift;
- my $mysqld_name= shift;
- my $output= sprintf("%s/log/%s.strace", $path_vardir_trace, $mysqld_name);
-
- mtr_add_arg($args, "-f");
- mtr_add_arg($args, "-o%s", $output);
-
- # Add strace options
- mtr_add_arg($args, '%s', $_) for (@strace_args);
-
- mtr_add_arg($args, $$exe);
-
- $$exe= $opt_stracer || "strace";
-
- if ($exe_libtool)
- {
- # Add "libtool --mode-execute" before the test to execute
- # if running in valgrind(to avoid valgrinding bash)
- unshift(@$args, "--mode=execute", $$exe);
- $$exe= $exe_libtool;
- }
-}
-
-#
# Search server logs for valgrind reports printed at mysqld termination
#
sub valgrind_exit_reports() {
@@ -6290,7 +5635,7 @@ sub usage ($) {
local $"= ','; # for @DEFAULT_SUITES below
- print <<HERE;
+ print <<HERE . My::Debugger::help() . <<HERE;
$0 [ OPTIONS ] [ TESTCASE ]
@@ -6417,32 +5762,11 @@ Options to run test on running server
Options for debugging the product
- boot-dbx Start bootstrap server in dbx
- boot-ddd Start bootstrap server in ddd
- boot-gdb Start bootstrap server in gdb
- client-dbx Start mysqltest client in dbx
- client-ddd Start mysqltest client in ddd
- client-debugger=NAME Start mysqltest in the selected debugger
- client-gdb Start mysqltest client in gdb
- dbx Start the mysqld(s) in dbx
- ddd Start the mysqld(s) in ddd
debug Dump trace output for all servers and client programs
debug-common Same as debug, but sets 'd' debug flags to
"query,info,error,enter,exit"
debug-server Use debug version of server, but without turning on
tracing
- debugger=NAME Start mysqld in the selected debugger
- gdb[=gdb_arguments] Start the mysqld(s) in gdb
- manual-debug Let user manually start mysqld in debugger, before
- running test(s)
- manual-gdb Let user manually start mysqld in gdb, before running
- test(s)
- manual-ddd Let user manually start mysqld in ddd, before running
- test(s)
- manual-dbx Let user manually start mysqld in dbx, before running
- test(s)
- manual-lldb Let user manually start mysqld in lldb, before running
- test(s)
max-save-core Limit the number of core files saved (to avoid filling
up disks for heavily crashing server). Defaults to
$opt_max_save_core. Set its default with
@@ -6456,38 +5780,7 @@ Options for debugging the product
$opt_max_test_fail, set to 0 for no limit. Set
it's default with MTR_MAX_TEST_FAIL
core-in-failure Generate a core even if run server is run with valgrind
-
-Options for valgrind
-
- valgrind Run the "mysqltest" and "mysqld" executables using
- valgrind with default options
- valgrind-all Synonym for --valgrind
- valgrind-mysqltest Run the "mysqltest" and "mysql_client_test" executable
- with valgrind
- valgrind-mysqld Run the "mysqld" executable with valgrind
- valgrind-options=ARGS Deprecated, use --valgrind-option
- valgrind-option=ARGS Option to give valgrind, replaces default option(s),
- can be specified more then once
- valgrind-path=<EXE> Path to the valgrind executable
- callgrind Instruct valgrind to use callgrind
-
-Options for strace
-
- strace Run the "mysqld" executables using strace. Default
- options are -f -o 'vardir'/log/'mysqld-name'.strace.
- client-strace Trace the "mysqltest".
- strace-option=ARGS Option to give strace, appends to existing options.
- stracer=<EXE> Specify name and path to the trace program to use.
- Default is "strace". Example: $0 --stracer=ktrace.
-
-Options for rr (Record and Replay)
- rr Run the "mysqld" executables using rr. Default run
- option is "rr record mysqld mysqld_options"
- boot-rr Start bootstrap server in rr
- rr-arg=ARG Option to give rr record, can be specified more then once
- rr-dir=DIR The directory where rr recordings are stored. Defaults
- to 'vardir'/rr.0 (rr.boot for bootstrap instance and
- rr.1, ..., rr.N for slave instances).
+HERE
Misc options
user=USER User for connecting to mysqld(default: $opt_user)
diff --git a/mysql-test/std_data/galera_certs/INFORMATION b/mysql-test/std_data/galera_certs/INFORMATION
new file mode 100644
index 00000000000..439ba394e09
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/INFORMATION
@@ -0,0 +1,2 @@
+WSREP certificates signed with root certificate.
+Password used is `galera`.
diff --git a/mysql-test/std_data/galera_certs/galera.1.crt b/mysql-test/std_data/galera_certs/galera.1.crt
new file mode 100644
index 00000000000..c1c1818a88c
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.1.crt
@@ -0,0 +1,25 @@
+-----BEGIN CERTIFICATE-----
+MIIEODCCAiACFG1AlRipIFaH2nn79vsiU6s8yxqwMA0GCSqGSIb3DQEBCwUAMFox
+CzAJBgNVBAYTAkZJMREwDwYDVQQIDAhIZWxzaW5raTERMA8GA1UEBwwISGVsc2lu
+a2kxDzANBgNVBAoMBkdhbGVyYTEUMBIGA1UEAwwLZ2FsZXJhLnJvb3QwHhcNMjEw
+MjA0MTMxOTU3WhcNMzAxMTA0MTMxOTU3WjBXMQswCQYDVQQGEwJGaTERMA8GA1UE
+CAwISGVsc2lua2kxETAPBgNVBAcMCEhlbHNpbmtpMQ8wDQYDVQQKDAZHYWxlcmEx
+ETAPBgNVBAMMCGdhbGVyYS4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAq1/qI8oopqwSG6TWLSzMOpm5iXilzQ5bM0E440ZVrdPo/OZvAb99yofMFJi9
+uD1zUigHA+PXoJhm0HYTt5VsymByYwKCT8aYC6Sm6a2H82uuS7Ze8JUBdl4ymZCe
+f56fYmh3EVu0Kf+rk9uCQGouzwrDNuS+MWxsV+lxVoAA08F19yJdvxehBNlGopqc
+Sw6NNa9SISCqjg32oR2RuFzkifV/olPHGl1FSzyzJ/zO2CQYjzT8W+UA/EtnFFCo
+XVJzOKHIbzTNKmj/kkX6esBZ7ItmAdGaTlIV6A/OR/wcKxzZBopd9wFSm42x+Dxk
+eMPKS7OFhaOypl/PHo1LsSrzKwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQApP88p
+5jaa26DhqBonGMBwgbnGzzXqrDlE6GX8Z5TffgOQjg7ZmMzdnS50iW5Jj9PcG3PL
+VpdxtKnyV/3qKJ40WpRvnUMcghyRB74h44y09Qh013uSpFR4ST7As1kAsRKYU7YP
+gc7Bc+rc3fjCOxqwRBIg/mosFCmW7UoogpeGNhuXFgl7ED3pjszAOjbLDxUkaQTj
+vbS9nWvtzE68STBdVTct91OIJPY2hNno8trwYqchQOG7wPOH/V+HzQ3jeLdE8AKw
+/PouspuE5RJmU9zcRzlKBKUsmjl+zD6nkyEzkfRO/JoDhBB0ReHsxaR+SU42K78n
+2H/qGjfhIcWsQVyIaRGqRTPZ6AhRX/04n8RTLHHkG/CLsgBcZnaOvVhraItUAEzC
+AQtD9vvF5uyzdmj0uu2TZNHJnbx+NXoNGJOJI2qUISLSdrVkS6qAHIdavypxDZZg
+4o5NZz+Jyc7Zq61LxLemKfD0isVsY91610A/1JwCy+Li99Mvng8gAJoP2NX/Cvmv
+i0QowP5uRRSL6YmiqRByer9yveSlxR03FvLeFSdftln3eEIyS9/kU74oJ+rOXPus
+fuB2ZNFHjmX7iXj3zf1kkpNCc03eaLY6P3+h2Opnqitz/XAT/eSWQ1huMKGm78ih
+C3Cd/yrwiA+AfxhYMJHl6CTpEWcJnqZQbPBjug==
+-----END CERTIFICATE-----
diff --git a/mysql-test/std_data/galera_certs/galera.1.csr b/mysql-test/std_data/galera_certs/galera.1.csr
new file mode 100644
index 00000000000..f0c396836f0
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.1.csr
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICnDCCAYQCAQAwVzELMAkGA1UEBhMCRmkxETAPBgNVBAgMCEhlbHNpbmtpMREw
+DwYDVQQHDAhIZWxzaW5raTEPMA0GA1UECgwGR2FsZXJhMREwDwYDVQQDDAhnYWxl
+cmEuMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKtf6iPKKKasEhuk
+1i0szDqZuYl4pc0OWzNBOONGVa3T6PzmbwG/fcqHzBSYvbg9c1IoBwPj16CYZtB2
+E7eVbMpgcmMCgk/GmAukpumth/Nrrku2XvCVAXZeMpmQnn+en2JodxFbtCn/q5Pb
+gkBqLs8KwzbkvjFsbFfpcVaAANPBdfciXb8XoQTZRqKanEsOjTWvUiEgqo4N9qEd
+kbhc5In1f6JTxxpdRUs8syf8ztgkGI80/FvlAPxLZxRQqF1SczihyG80zSpo/5JF
++nrAWeyLZgHRmk5SFegPzkf8HCsc2QaKXfcBUpuNsfg8ZHjDykuzhYWjsqZfzx6N
+S7Eq8ysCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQCUyo7S6TThiPiTRbMMu9Pu
+/YAK7mcxF3zG5rMzcf4fgaJAuUfE1Ct4y9eJ0k4gPevt37J2AhyTei6yS2ivESie
+exc/kztendR1PQmnRlICWa3ErXC1ZBJAVjaOx/S+Ttq5Tp4Bd/X2gvUb5JT+9Xbi
+NtxlnISh9cjO9BP7nfsCAbjqBhYT1hmYMlCDkTgHOPRpBQDQlRZ7e5jXDyzHaKzq
+yMfX0jo934oq2lkrV68q/9vmW0SrUU0X9GVDVRo8+4wTb1/dHQOcDaFO1LrsklaQ
+MqJkffv0tJB249+JkXHMzOZbfUTFn6jVJvMrSAQmOCIgXpswk0qmMM6ipEQkAlKW
+-----END CERTIFICATE REQUEST-----
diff --git a/mysql-test/std_data/galera_certs/galera.1.key b/mysql-test/std_data/galera_certs/galera.1.key
new file mode 100644
index 00000000000..adcd6a396a1
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.1.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAq1/qI8oopqwSG6TWLSzMOpm5iXilzQ5bM0E440ZVrdPo/OZv
+Ab99yofMFJi9uD1zUigHA+PXoJhm0HYTt5VsymByYwKCT8aYC6Sm6a2H82uuS7Ze
+8JUBdl4ymZCef56fYmh3EVu0Kf+rk9uCQGouzwrDNuS+MWxsV+lxVoAA08F19yJd
+vxehBNlGopqcSw6NNa9SISCqjg32oR2RuFzkifV/olPHGl1FSzyzJ/zO2CQYjzT8
+W+UA/EtnFFCoXVJzOKHIbzTNKmj/kkX6esBZ7ItmAdGaTlIV6A/OR/wcKxzZBopd
+9wFSm42x+DxkeMPKS7OFhaOypl/PHo1LsSrzKwIDAQABAoIBABrfppLIL8m7L/e/
+yIo6/SevVYX2MmHOf4SxFJ+nNuZUVbPa2st2YN5ynxEXxqBumnfmqPr2LqkTXQQR
+kBP4zZ+KskVObmreJJem0TnRqYEFgMaEqaymYR1TtjGYmRJAKJRA93L0Y3M2kYxV
+Hr7FJ+P1txkTk7OiYfcDN2+uEPMjoIwPTZYqcpw8UggF3zMZosBH3tf4yk/+5Q52
+MilRRjmoOFJSs9617OdgLoXEwQ4sAvg9UecrNR/octMnBUXKq5vWT+L9ub0fxATV
+8U+GUiv2gBnHGikbsqfV/7hZZy+R1V6b/hFrpTlTHXhKpM56ownT85tcI2WNVVOR
+FkDFXDECgYEA1gkIXzQ1O4wuxEBZCwH3hpzT2qCDou/yA91pQN6sekdxJffz8VJs
+5MUmr53vBTD3j57l+iPfa1yApNYEeQDXmiervdLjC0ep3FqyK8qS/J9x86K2E9Bt
+R8ElYGEsYoT93pzM7txPEZo3awgHAqmlPQ9mhujBpsR9xBjYi1IrGDkCgYEAzPmj
+ezryhMqFosh9OK5tirjGw3T28p+ywIl7wS5/Le32HV6sGsva3UpZhb67SkpCDbpF
+ihDV7KHFQqOvZBNSFc8gPvBgFlv4k5IzbU2q1/nO/TzgSnp1sAwlZv4shsiLz7sv
+x2ZhR8gPfO7cTS4281rdlhUuAMe79W2FRwm9/oMCgYEAkPSjH9864i5pie5On97g
+JeHWtS2amWJKFQYB/7YPN+1kmyNXqit5pmJDdhLPS0PDlhg2hvd+m7aVRY3Qj6bb
+XgLaFIBb1krdpmgiXPggHklaIngjOj4hlMQhrSmCpuKNERQ+0tKQFkrMl4djQBFm
+4HiqplnCtVBEIOf22Dx5BTkCgYAQssmhsWSucr0+TKz+4B7mbTUsGSxBCceLLega
+DcqomDkznVHSAQd8faEbZzVk0PXenm0p2UNEOs2SJzmTootOYYhT+EsrpyRyCTgN
+UIV5gM1fDgWLq7xIskSdxlkkRdQ2AR7cVLfaHC8+00q5MumhG6bvohwUTjE+xkRU
+TDPhNwKBgQDKmYS+6sUiUim69J1dP/DNCs4fabbOi/uKaAk0HdN1abnxgRROzrPI
+0BsD84XrG8/e0JhRiKG3Doq18ejNjfWsuyliHPCoJrTIj4IUq7bFvVbdOD6BQHNw
+VzHa3UImlF1LthRqRMV5As+GOF0pRCHeXyihi6KBDEZBG/SKaCp9rw==
+-----END RSA PRIVATE KEY-----
diff --git a/mysql-test/std_data/galera_certs/galera.2.crt b/mysql-test/std_data/galera_certs/galera.2.crt
new file mode 100644
index 00000000000..2d7ebb2d533
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.2.crt
@@ -0,0 +1,25 @@
+-----BEGIN CERTIFICATE-----
+MIIEODCCAiACFG1AlRipIFaH2nn79vsiU6s8yxqxMA0GCSqGSIb3DQEBCwUAMFox
+CzAJBgNVBAYTAkZJMREwDwYDVQQIDAhIZWxzaW5raTERMA8GA1UEBwwISGVsc2lu
+a2kxDzANBgNVBAoMBkdhbGVyYTEUMBIGA1UEAwwLZ2FsZXJhLnJvb3QwHhcNMjEw
+MjA0MTMyMTMzWhcNMzAxMTA0MTMyMTMzWjBXMQswCQYDVQQGEwJGSTERMA8GA1UE
+CAwISGVsc2lua2kxETAPBgNVBAcMCEhlbHNpbmtpMQ8wDQYDVQQKDAZHYWxlcmEx
+ETAPBgNVBAMMCGdhbGVyYS4yMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAq0DluHNB6VlRjMlQVhnABzB+wTAsC4DBQBAy/AzCPdEg67pZj1j1UKNitdfO
+/FLn6DCLoFhw/z5gFUpev3JzgHcbguOlf3AQA4p9zZn/R/g8fGJtUHolkYXT+V+j
++xUQ8dzdjelu6Xd0kpN9wigtKO4PUd3fzZ7QKen68zQPNEf+KFbSj/Dhk2iokt3N
+entl3MpLGJ+FsxPQwm7bagRdn66x7zAeRu4DifYh3i7lWkC+xE+bnB82BrzBHcMh
+N6uqdnKSdj078hRj/gcSJFMlOSaWCWoZdHQ7+3gp/bCi/dGywNxOgbsmuHznlUpE
+ELbhv6G0m6LzzB1NW7HFctyrNwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQAwtXxJ
+8ZQw1jZKUo0TP1D17Jdu36x2Tl8YwB+WMGdEvQSuKAnUEL/k4zOB5WI1VlNbJFAF
+dgsbHXjYrVCbDEpmN15sYXZ4J2NfGTmHAz38gB+r2LDlWj/+5L+VK+hkwGdbcaPX
+cDxm5M66ZbTvCFfozlHRh68/vGKhLnT0Iof3DekP7vaPUlfUUZGFh75vxUW8TM/S
+Ii4Tdo5D/gV4J/fUX2VKMKrJIYKcUxk48AFT0acCfzs9Uc5f4YYJ0vMrtGeVR0gl
+QLavAk3OW7IVY7trVrb0+qKfVuaxFli2hZN58ug/fpSN4wEMP31UxZ8WihG3xJ4x
+9ona+VR023ltpJerLWgHZyvH/HR8QnrQpPJ7y+2XLdI01gIQpYDwHsBBa8EkGjvG
+ra5YB07xOxxR4Wfr7/7gZzMvBkRr0wG/96iAfIB/ILYRJX+93gyqaVHS4RZRRQxe
+fsOpYOy5wMfPIjQQ4/Zd35NH+Y/dQcYqV+GdbbardXtNbT0tqLQesT3boBpsTxA9
+fkA9RayzTKHGojTv8p/FHh6yusfwa9MMdNsbkikM0YoAOTQwrBe7S9sm17Z6HxWr
+A7QqZGxAzAqI6aRRoro6z0KLNjuuiJysCSBqHB3yvPn6bV84UEUreoXFIHINkF/L
+B9S5zL+uYnA3X/ozdSmayNpipA3uYqqhUVSG9g==
+-----END CERTIFICATE-----
diff --git a/mysql-test/std_data/galera_certs/galera.2.csr b/mysql-test/std_data/galera_certs/galera.2.csr
new file mode 100644
index 00000000000..0fba110b92a
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.2.csr
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICnDCCAYQCAQAwVzELMAkGA1UEBhMCRkkxETAPBgNVBAgMCEhlbHNpbmtpMREw
+DwYDVQQHDAhIZWxzaW5raTEPMA0GA1UECgwGR2FsZXJhMREwDwYDVQQDDAhnYWxl
+cmEuMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKtA5bhzQelZUYzJ
+UFYZwAcwfsEwLAuAwUAQMvwMwj3RIOu6WY9Y9VCjYrXXzvxS5+gwi6BYcP8+YBVK
+Xr9yc4B3G4LjpX9wEAOKfc2Z/0f4PHxibVB6JZGF0/lfo/sVEPHc3Y3pbul3dJKT
+fcIoLSjuD1Hd382e0Cnp+vM0DzRH/ihW0o/w4ZNoqJLdzXp7ZdzKSxifhbMT0MJu
+22oEXZ+use8wHkbuA4n2Id4u5VpAvsRPm5wfNga8wR3DITerqnZyknY9O/IUY/4H
+EiRTJTkmlglqGXR0O/t4Kf2wov3RssDcToG7Jrh855VKRBC24b+htJui88wdTVux
+xXLcqzcCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQB1Je87IIfWW4YTvf1evm4/
+ICxImyQ4T5m3IOPrv9dV/DdxIoNSEfeudjyOmdeXchV5XxlGD2a6JDW4Pmf/gMlU
+5E6ySf4yvI/heDM05GdG623Nye41hCAqh9AIBOvhZEFlQ8/eDZFBXT9nZ4PFUshv
+7v1KlMNKHq5E3Y2eet3d0wDzE0CYJfkc0yoYX/y1IGVM2Td7/YmsSTz8Xm1OvIfx
+hbLzbnYIv4OLfSda/ntFxUy32c8jRxusbbrL4NKE5+eO5Sro1JR/rxRW9DIarp02
+8fzUyf1WhYIGtP/N3ZiZ4jqCsRyj0QmuwIohk33pKtb0APIA+qy8a13QSLUCPuHf
+-----END CERTIFICATE REQUEST-----
diff --git a/mysql-test/std_data/galera_certs/galera.2.key b/mysql-test/std_data/galera_certs/galera.2.key
new file mode 100644
index 00000000000..5b9bf3d0eac
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.2.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAq0DluHNB6VlRjMlQVhnABzB+wTAsC4DBQBAy/AzCPdEg67pZ
+j1j1UKNitdfO/FLn6DCLoFhw/z5gFUpev3JzgHcbguOlf3AQA4p9zZn/R/g8fGJt
+UHolkYXT+V+j+xUQ8dzdjelu6Xd0kpN9wigtKO4PUd3fzZ7QKen68zQPNEf+KFbS
+j/Dhk2iokt3Nentl3MpLGJ+FsxPQwm7bagRdn66x7zAeRu4DifYh3i7lWkC+xE+b
+nB82BrzBHcMhN6uqdnKSdj078hRj/gcSJFMlOSaWCWoZdHQ7+3gp/bCi/dGywNxO
+gbsmuHznlUpEELbhv6G0m6LzzB1NW7HFctyrNwIDAQABAoIBAQCAnv9qJ3bLkgAD
+43dpE3H8dFnfMxUBlrSOLxx73gFNeHJnWdDGLyQganZK6UlWjdYLt9pGleZYbjqw
+AulilM1XIR4SknPMYRhF8JBICW+IWFLlgO9lUDhDMeZhF4oLnGjbnuzwFvDsfIGb
+TRdY6d/xK8tpy2C6CJuDv25xlxoMQwJvSIViJd1qyCe4x3PDBvZ3TKMiWXfUGoSg
+75Yee6dIryCoWGACTjdiNdJpo99EIMJT8HIGQeBBJuIvSTsAgXMugDlnvUkq98LX
+XLJ+Lilx+enUb1WDbwZDMwJd0DlDVZyRVDJwlJRFUEuJvSRfMNKICEynmqJVc/36
+I7BzHNtRAoGBAOOGrH/r46oiSH6vE0VqgPIdlruLDZWoBIW0tVW1wb9E3NzExK53
+WEGy6FETsFGt3cYxtKd5Qy4rD5gfqYrfmDH/bmkK9p69pSjKcSgynABxCqM0DCth
+F3EGc5ZGDcA7Xar70NEP9COExFvPpi2bBq+8//OHNKWHe/aTYj3FuvyjAoGBAMCv
+ZrUR8NJfxw8Jqygc/L5BBW0gzh8ycHlQQPrpb4j01ncFcjTe1Tsc8BAkHYXZbS7o
+h+3JkgzQf7fHJmafKPO3esj2fZqdCoWBqhAf9Wk+9s33rTPo4OXdk22MdII7kV/e
+VpUKzdznKQmftleoJDVq6qBDMN9qQy8z5hVVv8xdAoGBAJEVSV3wzyWn4s5VWVaE
+SWEaGQnR64Got+mCh7b1xWvvv15PYpNVqsOKD7XTdjU/RxGglG/OVVZVQwZf+j6B
+wYzwMjltMkGa8HwISwu06eEmNABJqhDnQolh8ca7OP2BXYMwO8F0CNu1R3i7+l1O
+Y38gZ52kc5+xuwxKgvSc51U9AoGAKiq4aUvBzegT4eCVyjN9xAzqqRUSxpT9NC8x
+6TcIp4odHPLeKV/Sfhs5Fe8xXsdUM33DsW/5PECskoVMjAyso5k1j6ERn7JaSRk1
+JE46IIwc6roW91MxVFyHFOQ14wIyMC6x9+/jWUJlIqwG9WvgcKgzLFtH6LySc1QC
+OA0J6ikCgYAr7JEzFzckFvXNgdUiADnXztchwmjeV/CJZdaHhlVdHmtt0Z7PiWK9
+wRCKwA+wfTW1MeufP/t3l9MqdFx7MjxBbu2aVnd9BEDtGX6pch1pMa7CHyHayDiL
+UoD0lPrJ9hLftKkyMO2IL3kWlg5g3cpwVBzMKxNBynzQx7TQxUrAgA==
+-----END RSA PRIVATE KEY-----
diff --git a/mysql-test/std_data/galera_certs/galera.root.crt b/mysql-test/std_data/galera_certs/galera.root.crt
new file mode 100644
index 00000000000..e965d5a26d6
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.root.crt
@@ -0,0 +1,32 @@
+-----BEGIN CERTIFICATE-----
+MIIFlTCCA32gAwIBAgIUKCF88W+48rZzdfgYpE2dXVMGSKgwDQYJKoZIhvcNAQEL
+BQAwWjELMAkGA1UEBhMCRkkxETAPBgNVBAgMCEhlbHNpbmtpMREwDwYDVQQHDAhI
+ZWxzaW5raTEPMA0GA1UECgwGR2FsZXJhMRQwEgYDVQQDDAtnYWxlcmEucm9vdDAe
+Fw0yMTAyMDQxMzE3MDJaFw0yMzExMjUxMzE3MDJaMFoxCzAJBgNVBAYTAkZJMREw
+DwYDVQQIDAhIZWxzaW5raTERMA8GA1UEBwwISGVsc2lua2kxDzANBgNVBAoMBkdh
+bGVyYTEUMBIGA1UEAwwLZ2FsZXJhLnJvb3QwggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQDKqL45jbaq8RLOj+DeilPcEnBN5gn/y9V3IfZ0BQCd4bR09zLz
+7BQKz6QS825Wi56HC155W1xPMR0RYWy3I3owreQtfdGJuYoTKLpRSoqWJgy/FSzR
++Tr34WfpeIj6754YRm7MndWBPVkujPtOWz6EHn+2oUNIpCZAOwXtMrlJzf5GwNBu
+4kwkylz0whs3iTS//pZLyqk6MsLI7tebmfi9qyaM0b+C1OKiBRQRIjPON8Htp7Au
+GDyOqA4Y9IQlAzZVqy2PP79Ci2FpPF3+01ByWGY6vAIxma2VXS/aNvUvGnuzH8hz
+A5xg1+5Fv2kdxffcWLjp5/WSIaTUiBFMBRKswTtfo+vWuVpzXGvlExGHd10m+MhK
+Avoqq6N28ql6E5pDDH5k6aZ1eB6nKF6BU4BMa4SUPBX/qz8PMbb5j0+n645Gj/G2
+0DfCQoyHd6sMAZZ9LgTjGB/R6sz74YF72q6xECTCygn5HY3qjvmx0BYlIkQDKKqh
+bq2ZmsLLCwtyfUeW144eMhErNZA1MwoJxd8LM0TpJ0nXQdEESf5oS5fMLZnVrxah
+dl5QYYMbmyNedNKdwV4idhGCy+Zq7VAX4lBXazI1rD9vQb+oTcPGQiy4i/Vi/g6i
+F+XZTdTiaOWPEmvFFGLLUQxKl4w872hJaupqfteqdiZ+3ICVIUI8qnXHmwIDAQAB
+o1MwUTAdBgNVHQ4EFgQUs75v/MgjJ5RHGE6+0qdiVo4BwlowHwYDVR0jBBgwFoAU
+s75v/MgjJ5RHGE6+0qdiVo4BwlowDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0B
+AQsFAAOCAgEAOVhBs28dwwvD5q2r7oVVcxLc+tb8zu4XxpXT1p6hiZYUyPguCh00
+GVdXCgR4JMI/NcyM5fBAbF3S8oK3+9rw2kW09afVV06Qf/8o3nIyOiDl7598tGIP
+CCK4QsUW/dGajx5kvhtQ7qce+u9KfFTof6lq2xkYtFBBhmBdSv9A1jAZJMw2x3bc
+nr99PS8XZMphS0MIExHKj6Ry5DdYm722zZHyIEiiEGyMViDm2m1iug5r/LPH5Z56
+BjQiH4VP+0y5mevBOUGuH8ID+J9Hu9BeoXLhkv+W2Ljs/S6wqzjinMBqVG+wwe0Y
+a8F5pABkl5uX38nMQ7CikSbLxSbn7nRf+sux1sbzqjMldeCSqiv9mI5Ysq97+Ni1
+5qMxNxNc0u/wGRnrXH8fWfxBKPP5moA7DQfVcUWPgDGQwDpA8kn8RlJxFk3g4yaK
++NMwk5MORKyx3tz/A3Yhs9AUXk3okvmQCT2YVSHcKUB8PAU+TaKqbr3wk07Y/tL/
+jFPHS+t3eD91Y05KGUXjdtGi+33zpV0biHmTWAZT78VQowDNvEpTnXhkSx8HGHYR
+nqSMU2m2LboHSatY113RYznx0LJ1azczRlJdGs8oyPWLPDD2JCesZaQqGZVRJoms
+lK4EzYEb5mZTCRgtgoiO+iKcf6XifuOCrWZXoLm4FlLEfOQ3b8yAFlo=
+-----END CERTIFICATE-----
diff --git a/mysql-test/std_data/galera_certs/galera.root.key b/mysql-test/std_data/galera_certs/galera.root.key
new file mode 100644
index 00000000000..8472f87e714
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.root.key
@@ -0,0 +1,54 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,F0ACFFE47CF32BB2
+
+F5PwhFDgzuaD7ISSmSn3+GpEoWipTwOPjE8ei9LsQ1a/RdcW19kmB9sJojOSVlV6
+CbLeEl/YKkkAZvbupKvpL0i82WTi+V7W9iKNU3M9tN1JE3WNGCYrL16aUrlnXszi
+eiNhWzAGxtpXNTv+d9gdgYZ0mHJrDk390tYGRcnrEp8FasL0aI4AnIWrJq9zgHeP
+m8K0RxIB6Rp07+SEfsO0pZuGPIkO/qVhIDXYhQrEkap3viPZouH1qzaJNaQXD1ZE
+EoP0n/jbX3KuLeep4aGdEGq/eAZx/WUZpZWECOqwZ7HDNFcXj4nZRNQmCpSobEHU
+e+9DL+nnmoMtsFG7Jb6S3d8Fxv5DLqm7LTHweZzlkw0pD5sVVMSeb6Xq2Cshxsf2
+htQGZDtxscTimDrLHsNonw1tVBfY0HPpTNNmjyKpa2lmbQ4KQls3A4i9yZaXM1W4
+pkc1Emk0ot+yXZU+SNPeAgAMCf5rygiQtsALYdbkZVpVvziVVBFdav+v9RtwrMfJ
+fli6GbAwm2mKXdFlc8SnfbNxKw8v2K26IUZFEzwiE/MxwOR4UBwcNa2JcXKsMGVt
+o2pBOJr0dquywV37EV7hu2S6Kgf1nREgksON86REmZoPpzMlCpTf805awdDN/a/K
+jJkRrekRinrPaIDfvwQjIhhXYWSVQywJ2nTL6+78SvnKRNmIFDGdmOY7CtsyRvkp
+P2Qp+JhzT4VM8x2sApBQiN3S3dSr1D8gmdwKGiLp82GhMIdGNjVpxPSVjCFC3Apu
+hvRw3LSJ7iUWLrtsR2yZeDBHh1yDraLYo76qYqvgYDKYa+R1GiJd70t5cDSqgwV9
+EXlnA32VsRrAXdRypGNAq934TSWiO4l+M60bt/K/U41uQHas1gWsGdtWcjCLTylf
+YT8G+4QnIrOGJzyKZeIrawu/GtxrK2S5xe0ZAOwYQmJYiIH4TfvdrNX4DNEfGpd/
+CrBULWCvAtgqNrNgpFuV8027yAZWPpG2DSE+Q+XTahxjNUxAI7kkXB/Xxc5KfhSF
+BaYHQ2TfTWnwD3K1BNWM4yZ/L4up8UIrXyMdk7q0LofwCTKGOgqzwVaez86gpriL
+pphXPYIY9cX9qLBs5YuVGk9ApkbC0VRqmXcCL7OEAzso14ItHVlVU73QSS+I+3sK
+sqwb0USLuN7qZUVhtJIyAkK/D+DmNYSg4eqhcMRnJAanD2acmwtAy0HE+YVcQN/X
+gEwSOdkf7WkwDshvBPA8rl4YWRxwKxstS0wyPG8rzMZWYYB2xf3nd78NtdzVZSnN
+pDUJkUKorCaSuanMvjKS9r+sILzUc1bkO/T//qTY/HtRtl8AffcXIjyJkXb5PSjt
+dxq7ktkeESAB9vb+c7nWolMVqxUD+1KpHi1kwR4IuAWtJe4G+82pDc6wLEZjeJMO
+nd2NNW/CToWv0YfWonGUDZbOFqee7H0Tioqjni4uQbLcDZJHr6i3wwGIncIENo73
+DqfYEpNT3U6uLSC/cNv3lvIRjVziBEB9f0KgOHt38UIsZ4oJAMV/B63Wfn1nd1g4
+NBdNwO48M2KHbxgwXJxxX1jQOj9+IJiVaGIbpzeeJ3yTKDMfkBKH4eQj2/aAA1zK
+pqFRyoBhuviMZ1IeBOVyb3QHJSINry/WiiOKoHLvvFM7KXgygedJwg6k8SHCQCfS
+D9RignCNT/O8VhJ4hpzAXbEpeAXJn+AHYGIxL6Fu+0dwDKJjruvcLpVVt9GLLdzX
+OBgsK0z9m3hLOf8dfY97jMkRTHtokrA28iheLh6l8rFFesJM8lqFreFRRJMTXeHW
+Xwa82PbUdIWygguAlSj035aGUuNRNDZq4Bh4XTVndCDbUfRBlYF9yMduvSqaKsmm
+tLJ8V04vDECQpUcAPTCCbTDFoV8/KVDsxMTgZkdiJG/Vv2y8mzy5FPAJBNk/HCcx
+E8qc7KF80l+YGQ/IgGzzz9r9DqaXy21FwaiA6TRFth9hXMdzZLVBNfpzZ+dGMJEQ
+PAoWUshcNNwAZVU0+GzdQvdckGUcWgnMZpzXswaUmXbMidQ6VQnDTQdj5qIEnrMW
+CrIHVs+hhjcGRbGf6DGHQpQbjD9FWX38PVzPpocu1qmLKVTUoFPqm0EqU01SYLFu
+S9ntmlOqYJJIR0LbXJvAL7tVKHiK4gR8NMfN2YKPi4Eg3GKTZ5XaSpNPQJnR0ZqP
++sU07jjQTcVuCD8Cx8c9LGXa6PNAwBufk3jrz/vZy8AQEs13aMu3thHhLQWKrgi5
+jVdoLNIqQZJH7aY0YjoaD6if+4uHtRQOqUcJPUyxWnwXx+Y/o+9DrpH5K9V9fcVR
+e7Ej8j2Ha4yzZw7M9Uze0unMRQOhE2lbRsP4C6f/TK3izeSlmhG0D/pHfHx9GKbf
+S7TPnD0YhUS9TpXX3BEVSXVjIkkbIiC0djq3OI+3PSn/PJqWjCw8pL7JZp5T8J1u
+sqyQCge4XjYmmj9Np511tcviq+jmobf0b+WMmxxV21/Au+v2uI/7eAwUyWYIJYny
+kcGUDmEZohsFx6hYbRsH+bSEUqC6MuKiUaaqEb97IoR33D3ZajMBchw+Yg0jh/wJ
+S8FKEB0NlS00051UnwdsjBKyuOMWT6xH9VVR8W+7t6i0rMDxb4DjP3T+BqjXCT0d
+kiHRXRALxotk+WVRC4qRVr0kmFut9bLjlFu2Hlbnpmm7zmJcE3hbkhWjXqDsysp7
+SKJAs+IvYrTMEtURflKiN/n7y6SbXdCXvw+lRTeTjT9h8DiIMsK5vw2SSjWPaQnu
+ikCATObciGyro3aImzhaBBY0r/F4Q1KsvLi+xKo+JoDHSVNjNg9SNjQKhyVRFJq/
+quwTP019B5U3ykWj2/i7HV9IBH+nGEUuvpI5esUoIWTvdCkVdAEeSg0vwkJoohbb
+l9HjDYyEJxoLhwaR7Mqh+uDxHBK2Kqh8TkIXjdUbXqTRIX6lajzJ/p7owoE48sHa
+iWo9tN+4bOabEjPAkEhLy2cLUfWEPjClo8YZelif8cZigKzdSDbxdmyugfFtZfQX
+NYwcMYayGBCETNyByLbBjNO+7XPlNcfqQJlFWsGOrzmJdoxtW7CYYqbN8qzhpNox
+MSRK7T+eUDFKNjY53lPfUPUFgcXq+9IKicf6cYE8gsI3/5I9vzLk3Lt7ZLXMgFv7
+-----END RSA PRIVATE KEY-----
diff --git a/mysql-test/std_data/galera_certs/galera.root.srl b/mysql-test/std_data/galera_certs/galera.root.srl
new file mode 100644
index 00000000000..e6c62e06527
--- /dev/null
+++ b/mysql-test/std_data/galera_certs/galera.root.srl
@@ -0,0 +1 @@
+6D409518A9205687DA79FBF6FB2253AB3CCB1AB1
diff --git a/mysql-test/suite/binlog/disabled.def b/mysql-test/suite/binlog/disabled.def
index 424e5549541..888298bbb09 100644
--- a/mysql-test/suite/binlog/disabled.def
+++ b/mysql-test/suite/binlog/disabled.def
@@ -9,5 +9,3 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
-binlog_truncate_innodb : BUG#11764459 2010-10-20 anitha Originally disabled due to BUG#42643. Product bug fixed, but test changes needed
-binlog_spurious_ddl_errors : BUG#11761680 2013-01-18 astha Fixed on mysql-5.6 and trunk
diff --git a/mysql-test/suite/binlog/include/binlog_write_error.inc b/mysql-test/suite/binlog/include/binlog_write_error.inc
deleted file mode 100644
index fa3ba087a7e..00000000000
--- a/mysql-test/suite/binlog/include/binlog_write_error.inc
+++ /dev/null
@@ -1,108 +0,0 @@
-#
-# This include file is used by more than one test suite
-# (currently binlog and binlog_encryption).
-# Please check all dependent tests after modifying it
-#
-
-#
-# === Name ===
-#
-# binlog_write_error.test
-#
-# === Description ===
-#
-# This test case check if the error of writing binlog file is properly
-# reported and handled when executing statements.
-#
-# === Related Bugs ===
-#
-# BUG#37148
-#
-
-source include/have_log_bin.inc;
-source include/have_debug.inc;
-source include/have_binlog_format_mixed_or_statement.inc;
-
---echo #
---echo # Initialization
---echo #
-
-disable_warnings;
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP FUNCTION IF EXISTS f2;
-DROP PROCEDURE IF EXISTS p1;
-DROP PROCEDURE IF EXISTS p2;
-DROP TRIGGER IF EXISTS tr1;
-DROP TRIGGER IF EXISTS tr2;
-DROP VIEW IF EXISTS v1, v2;
-enable_warnings;
-
---echo #
---echo # Test injecting binlog write error when executing queries
---echo #
-
-let $query= CREATE TABLE t1 (a INT);
-source include/binlog_inject_error.inc;
-
-INSERT INTO t1 VALUES (1),(2),(3);
-
-let $query= INSERT INTO t1 VALUES (4),(5),(6);
-source include/binlog_inject_error.inc;
-
-let $query= UPDATE t1 set a=a+1;
-source include/binlog_inject_error.inc;
-
-let $query= DELETE FROM t1;
-source include/binlog_inject_error.inc;
-
-let $query= CREATE TRIGGER tr1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t1 VALUES (new.a + 100);
-source include/binlog_inject_error.inc;
-
-let $query= DROP TRIGGER tr1;
-source include/binlog_inject_error.inc;
-
-let $query= ALTER TABLE t1 ADD (b INT);
-source include/binlog_inject_error.inc;
-
-let $query= CREATE VIEW v1 AS SELECT a FROM t1;
-source include/binlog_inject_error.inc;
-
-let $query= DROP VIEW v1;
-source include/binlog_inject_error.inc;
-
-let $query= CREATE PROCEDURE p1(OUT rows_cnt INT) SELECT count(*) INTO rows_cnt FROM t1;
-source include/binlog_inject_error.inc;
-
-let $query= DROP PROCEDURE p1;
-source include/binlog_inject_error.inc;
-
-let $query= DROP TABLE t1;
-source include/binlog_inject_error.inc;
-
-let $query= CREATE FUNCTION f1() RETURNS INT return 1;
-source include/binlog_inject_error.inc;
-
-let $query= DROP FUNCTION f1;
-source include/binlog_inject_error.inc;
-
-let $query= CREATE USER user1;
-source include/binlog_inject_error.inc;
-
-let $query= REVOKE ALL PRIVILEGES, GRANT OPTION FROM user1;
-source include/binlog_inject_error.inc;
-
-let $query= DROP USER user1;
-source include/binlog_inject_error.inc;
-
---echo #
---echo # Cleanup
---echo #
-
-disable_warnings;
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP PROCEDURE IF EXISTS p1;
-DROP TRIGGER IF EXISTS tr1;
-DROP VIEW IF EXISTS v1, v2;
-enable_warnings;
diff --git a/mysql-test/suite/binlog/include/binlog_xa_recover.inc b/mysql-test/suite/binlog/include/binlog_xa_recover.inc
deleted file mode 100644
index de2703377cc..00000000000
--- a/mysql-test/suite/binlog/include/binlog_xa_recover.inc
+++ /dev/null
@@ -1,281 +0,0 @@
-#
-# This include file is used by more than one test suite
-# (currently binlog and binlog_encryption).
-# Please check all dependent tests after modifying it
-#
-
---source include/have_innodb.inc
---source include/have_debug.inc
---source include/have_debug_sync.inc
---source include/have_binlog_format_row.inc
-# Valgrind does not work well with test that crashes the server
---source include/not_valgrind.inc
-
-# (We do not need to restore these settings, as we crash the server).
-SET GLOBAL max_binlog_size= 4096;
-SET GLOBAL innodb_flush_log_at_trx_commit= 1;
-RESET MASTER;
-
-CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
-# Insert some data to force a couple binlog rotations (3), so we get some
-# normal binlog checkpoints before starting the test.
-INSERT INTO t1 VALUES (100, REPEAT("x", 4100));
-# Wait for the master-bin.000002 binlog checkpoint to appear.
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000002"
---let $field= Info
---let $condition= = "master-bin.000002"
---source include/wait_show_condition.inc
-INSERT INTO t1 VALUES (101, REPEAT("x", 4100));
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
---let $field= Info
---let $condition= = "master-bin.000003"
---source include/wait_show_condition.inc
-INSERT INTO t1 VALUES (102, REPEAT("x", 4100));
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
---let $field= Info
---let $condition= = "master-bin.000004"
---source include/wait_show_condition.inc
-
-# Now start a bunch of transactions that span multiple binlog
-# files. Leave then in the state prepared-but-not-committed in the engine
-# and crash the server. Check that crash recovery is able to recover all
-# of them.
-#
-# We use debug_sync to get all the transactions into the prepared state before
-# we commit any of them. This is because the prepare step flushes the InnoDB
-# redo log - including any commits made before, so recovery would become
-# unnecessary, decreasing the value of this test.
-#
-# We arrange to have con1 with a prepared transaction in master-bin.000004,
-# con2 and con3 with a prepared transaction in master-bin.000005, and a new
-# empty master-bin.000006. So the latest binlog checkpoint should be
-# master-bin.000006.
-
-connect(con1,localhost,root,,);
-# First wait after prepare and before write to binlog.
-SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con1_wait WAIT_FOR con1_cont";
-# Then complete InnoDB commit in memory (but not commit checkpoint / write to
-# disk), and hang until crash, leaving a transaction to be XA recovered.
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR _ever";
-send INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con1_wait";
-
-connect(con2,localhost,root,,);
-SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con2_wait WAIT_FOR con2_cont";
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR _ever";
-send INSERT INTO t1 VALUES (2, NULL);
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con2_wait";
-
-connect(con3,localhost,root,,);
-SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con3_wait WAIT_FOR con3_cont";
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con3_ready WAIT_FOR _ever";
-send INSERT INTO t1 VALUES (3, REPEAT("x", 4100));
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con3_wait";
-
-connect(con4,localhost,root,,);
-SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con4_wait WAIT_FOR con4_cont";
-SET SESSION debug_dbug="+d,crash_commit_after_log";
-send INSERT INTO t1 VALUES (4, NULL);
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con4_wait";
-
-SET DEBUG_SYNC= "now SIGNAL con1_cont";
-SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
-SET DEBUG_SYNC= "now SIGNAL con2_cont";
-SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
-SET DEBUG_SYNC= "now SIGNAL con3_cont";
-SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
-
-# Check that everything is committed in binary log.
---source include/show_binary_logs.inc
---let $binlog_file= master-bin.000003
---let $binlog_start= 4
---source include/show_binlog_events.inc
---let $binlog_file= master-bin.000004
---source include/show_binlog_events.inc
---let $binlog_file= master-bin.000005
---source include/show_binlog_events.inc
---let $binlog_file= master-bin.000006
---source include/show_binlog_events.inc
-
-
-# Check that server will not purge too much.
-PURGE BINARY LOGS TO "master-bin.000006";
---source include/show_binary_logs.inc
-
-# Now crash the server with one more transaction in prepared state.
---write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-wait-binlog_xa_recover.test
-EOF
---error 0,2006,2013
-SET DEBUG_SYNC= "now SIGNAL con4_cont";
-connection con4;
---error 2006,2013
-reap;
-
---append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-restart-group_commit_binlog_pos.test
-EOF
-
-connection default;
---enable_reconnect
---source include/wait_until_connected_again.inc
-
-# Check that all transactions are recovered.
-SELECT a FROM t1 ORDER BY a;
-
---echo Test that with multiple binlog checkpoints, recovery starts from the last one.
-SET GLOBAL max_binlog_size= 4096;
-SET GLOBAL innodb_flush_log_at_trx_commit= 1;
-RESET MASTER;
-
-# Rotate to binlog master-bin.000003 while delaying binlog checkpoints.
-# So we get multiple binlog checkpoints in master-bin.000003.
-# Then complete the checkpoints, crash, and check that we only scan
-# the necessary binlog file (ie. that we use the _last_ checkpoint).
-
-connect(con10,localhost,root,,);
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con10_ready WAIT_FOR con10_cont";
-send INSERT INTO t1 VALUES (10, REPEAT("x", 4100));
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con10_ready";
-
-connect(con11,localhost,root,,);
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con11_ready WAIT_FOR con11_cont";
-send INSERT INTO t1 VALUES (11, REPEAT("x", 4100));
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con11_ready";
-
-connect(con12,localhost,root,,);
-SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con12_ready WAIT_FOR con12_cont";
-send INSERT INTO t1 VALUES (12, REPEAT("x", 4100));
-
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR con12_ready";
-INSERT INTO t1 VALUES (13, NULL);
-
---source include/show_binary_logs.inc
---let $binlog_file= master-bin.000004
---let $binlog_start= 4
---source include/show_binlog_events.inc
-
-SET DEBUG_SYNC= "now SIGNAL con10_cont";
-connection con10;
-reap;
-connection default;
-
-# We need to sync the test case with the background processing of the
-# commit checkpoint, otherwise we get nondeterministic results.
-SET @old_dbug= @@global.DEBUG_DBUG;
-SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
-
-SET DEBUG_SYNC= "now SIGNAL con12_cont";
-connection con12;
-reap;
-connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
-SET GLOBAL debug_dbug= @old_dbug;
-
-SET DEBUG_SYNC= "now SIGNAL con11_cont";
-connection con11;
-reap;
-
-connection default;
-# Wait for the last (master-bin.000004) binlog checkpoint to appear.
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
---let $field= Info
---let $condition= = "master-bin.000004"
---source include/wait_show_condition.inc
-
---echo Checking that master-bin.000004 is the last binlog checkpoint
---source include/show_binlog_events.inc
-
---echo Now crash the server
-# It is not too easy to test XA recovery, as it runs early during server
-# startup, before any connections can be made.
-# What we do is set a DBUG error insert which will crash if XA recovery
-# starts from any other binlog than master-bin.000004 (check the file
-# binlog_xa_recover-master.opt). Then we will fail here if XA recovery
-# would start from the wrong place.
---write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-wait-binlog_xa_recover.test
-EOF
-SET SESSION debug_dbug="+d,crash_commit_after_log";
---error 2006,2013
-INSERT INTO t1 VALUES (14, NULL);
-
---append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-restart-group_commit_binlog_pos.test
-EOF
-
-connection default;
---enable_reconnect
---source include/wait_until_connected_again.inc
-
-# Check that all transactions are recovered.
-SELECT a FROM t1 ORDER BY a;
-
-
---echo *** Check that recovery works if we crashed early during rotate, before
---echo *** binlog checkpoint event could be written.
-
-SET GLOBAL max_binlog_size= 4096;
-SET GLOBAL innodb_flush_log_at_trx_commit= 1;
-RESET MASTER;
-
-# We need some initial data to reach binlog master-bin.000004. Otherwise
-# crash recovery fails due to the error insert used for previous test.
-INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
-INSERT INTO t1 VALUES (22, REPEAT("x", 4100));
-# Wait for the master-bin.000003 binlog checkpoint to appear.
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
---let $field= Info
---let $condition= = "master-bin.000003"
---source include/wait_show_condition.inc
-INSERT INTO t1 VALUES (23, REPEAT("x", 4100));
-# Wait for the last (master-bin.000004) binlog checkpoint to appear.
---let $wait_for_all= 0
---let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
---let $field= Info
---let $condition= = "master-bin.000004"
---source include/wait_show_condition.inc
-
---write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-wait-binlog_xa_recover.test
-EOF
-SET SESSION debug_dbug="+d,crash_before_write_checkpoint_event";
---error 2006,2013
-INSERT INTO t1 VALUES (24, REPEAT("x", 4100));
-
---append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
-restart-group_commit_binlog_pos.test
-EOF
-
---enable_reconnect
---source include/wait_until_connected_again.inc
-
-# Check that all transactions are recovered.
-SELECT a FROM t1 ORDER BY a;
-
---source include/show_binary_logs.inc
---let $binlog_file= master-bin.000004
---let $binlog_start= 4
---source include/show_binlog_events.inc
-
-# Cleanup
-connection default;
-DROP TABLE t1;
diff --git a/mysql-test/suite/binlog/r/binlog_checkpoint_flush_logs.result b/mysql-test/suite/binlog/r/binlog_checkpoint_flush_logs.result
new file mode 100644
index 00000000000..e861f36394c
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_checkpoint_flush_logs.result
@@ -0,0 +1,52 @@
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+*** Test that FLUSH LOGS waits if a transaction ordered commit is in progress.
+connect con1,localhost,root,,;
+SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
+INSERT INTO t1 VALUES (1, REPEAT("x", 1));
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "rotate_after_rotate SIGNAL con_flush_ready WAIT_FOR default_go";
+FLUSH LOGS;
+connect con2,localhost,root,,;
+Trx_1 is not yet committed:
+SELECT count(*) as 'ZERO' from t1;
+ZERO
+0
+Wait for Trx_2 has rotated binlog:
+SET DEBUG_SYNC= "now WAIT_FOR con_flush_ready";
+SET DEBUG_SYNC= "now SIGNAL default_go";
+# restart
+connection default;
+Must be tree logs in the list:
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000001 # Gtid_list # # []
+master-bin.000001 # Binlog_checkpoint # # master-bin.000001
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1, REPEAT("x", 1))
+master-bin.000001 # Table_map # # table_id: # (test.t1)
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Rotate # # master-bin.000002;pos=POS
+Only one Binlog checkpoint must exist and point to master-bin.000001
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000002 # Gtid_list # # [#-#-#]
+master-bin.000002 # Binlog_checkpoint # # master-bin.000001
+SELECT count(*) as 'ONE' from t1;
+ONE
+1
+connection default;
+DROP TABLE t1;
+SET debug_sync = 'reset';
diff --git a/mysql-test/suite/binlog/r/binlog_spurious_ddl_errors.result b/mysql-test/suite/binlog/r/binlog_spurious_ddl_errors.result
index 1a81eee1a58..798bd8ab853 100644
--- a/mysql-test/suite/binlog/r/binlog_spurious_ddl_errors.result
+++ b/mysql-test/suite/binlog/r/binlog_spurious_ddl_errors.result
@@ -1,5 +1,5 @@
SET @old_binlog_format= @@global.binlog_format;
-INSTALL PLUGIN example SONAME 'ha_example.so';
+INSTALL PLUGIN example SONAME 'ha_example';
################################################################################
# Verifies if ER_BINLOG_STMT_MODE_AND_ROW_ENGINE happens by setting the binlog
# format to STATEMENT and the transaction isolation level to READ COMMITTED as
@@ -18,7 +18,7 @@ ALTER TABLE t_row ADD COLUMN b INT;
CREATE TRIGGER trig_row BEFORE INSERT ON t_row FOR EACH ROW INSERT INTO t_stmt VALUES (1);
CREATE INDEX i ON t_row(a);
CREATE TABLE t_row_new ENGINE = InnoDB SELECT * FROM t_row;
-ERROR HY000: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging. InnoDB is limited to row-logging when transaction isolation level is READ COMMITTED or READ UNCOMMITTED.
+ERROR HY000: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.
DROP TABLE t_row;
@@ -36,12 +36,11 @@ DROP TABLE t_row;
SET binlog_format = ROW;
CREATE TABLE t_stmt (a VARCHAR(100)) ENGINE = EXAMPLE;
ALTER TABLE t_stmt ADD COLUMN b INT;
-ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE'
CREATE TRIGGER trig_stmt BEFORE INSERT ON t_stmt FOR EACH ROW INSERT INTO t_stmt VALUES (1);
CREATE INDEX i ON t_stmt(a);
ERROR 42000: Too many key parts specified; max 0 parts allowed
CREATE TABLE t_stmt_new ENGINE = EXAMPLE SELECT * FROM t_stmt;
-ERROR HY000: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging.
+ERROR HY000: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging
DROP TABLE t_stmt;
diff --git a/mysql-test/suite/binlog/r/binlog_statement_insert_delayed.result b/mysql-test/suite/binlog/r/binlog_statement_insert_delayed.result
index d6875ab60e0..a4cd5b4080d 100644
--- a/mysql-test/suite/binlog/r/binlog_statement_insert_delayed.result
+++ b/mysql-test/suite/binlog/r/binlog_statement_insert_delayed.result
@@ -50,3 +50,4 @@ a
400
401
drop table t1;
+reset master;
diff --git a/mysql-test/suite/binlog/r/binlog_truncate_innodb.result b/mysql-test/suite/binlog/r/binlog_truncate_innodb.result
index 8beeeb1a428..87ce8e30dee 100644
--- a/mysql-test/suite/binlog/r/binlog_truncate_innodb.result
+++ b/mysql-test/suite/binlog/r/binlog_truncate_innodb.result
@@ -7,9 +7,11 @@ INSERT INTO t2 VALUES (1),(2),(3);
**** Truncate of empty table shall be logged
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -18,18 +20,17 @@ DROP TABLE t1,t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -37,16 +38,20 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
# Even though the isolation level might be permissive, truncate
# table follows a stricter isolation as its locking is based on
@@ -59,9 +64,11 @@ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -71,18 +78,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -90,16 +96,20 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
@@ -109,9 +119,11 @@ SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -121,18 +133,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -140,16 +151,20 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
@@ -159,9 +174,11 @@ SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -171,18 +188,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -190,16 +206,20 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
@@ -209,9 +229,11 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -221,18 +243,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -240,16 +261,20 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+master-bin.000001 # Annotate_rows # # INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Table_map # # table_id: # (test.t2)
-master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
SET BINLOG_FORMAT=STATEMENT;
RESET MASTER;
@@ -261,9 +286,11 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -273,18 +300,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -292,14 +318,16 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
# Truncate is not supported for SBR if the isolation level is
# READ UNCOMMITTED or READ COMMITTED. These specific isolation
@@ -312,9 +340,11 @@ SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -324,18 +354,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -343,14 +372,16 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
@@ -360,9 +391,11 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t1;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
TRUNCATE TABLE t2;
-show binlog events from <binlog_start>;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t2
DROP TABLE t1,t2;
#
@@ -372,18 +405,17 @@ CREATE TABLE t1 (a INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
-# Connection: default
BEGIN;
INSERT INTO t2 SELECT * FROM t1;
-# Connection: truncate
+connect truncate,localhost,root,,;
TRUNCATE TABLE t1;
-# Connection: default
+connection default;
INSERT INTO t2 SELECT * FROM t1;
SELECT COUNT(*) FROM t2;
COUNT(*)
4
COMMIT;
-# Connection: truncate
+connection truncate;
# Reaping TRUNCATE TABLE
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -391,13 +423,16 @@ COUNT(*)
SELECT COUNT(*) FROM t2;
COUNT(*)
4
-# Connection: default
-show binlog events from <binlog_start>;
+connection default;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Query # # use `test`; INSERT INTO t2 SELECT * FROM t1
master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; TRUNCATE TABLE t1
+disconnect truncate;
DROP TABLE t1,t2;
-SET BINLOG_FORMAT=@old_binlog_format;
+SET @@global.binlog_format = @old_binlog_format;
+SET @@session.binlog_format = @old_binlog_format;
diff --git a/mysql-test/suite/binlog/r/binlog_write_error.result b/mysql-test/suite/binlog/r/binlog_write_error.result
index 6e8a212035a..2ee68465243 100644
--- a/mysql-test/suite/binlog/r/binlog_write_error.result
+++ b/mysql-test/suite/binlog/r/binlog_write_error.result
@@ -1,15 +1,4 @@
#
-# Initialization
-#
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP FUNCTION IF EXISTS f2;
-DROP PROCEDURE IF EXISTS p1;
-DROP PROCEDURE IF EXISTS p2;
-DROP TRIGGER IF EXISTS tr1;
-DROP TRIGGER IF EXISTS tr2;
-DROP VIEW IF EXISTS v1, v2;
-#
# Test injecting binlog write error when executing queries
#
set @saved_dbug = @@global.debug_dbug;
@@ -111,15 +100,13 @@ ERROR HY000: Error writing file 'master-bin' ((errno: #)
set @@global.debug_dbug = @saved_dbug;
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug='d,injecting_fault_writing';
+SET PASSWORD FOR user1=PASSWORD('foobar');
+SET PASSWORD FOR user1=PASSWORD('foobar');
+ERROR HY000: Error writing file 'master-bin' ((errno: #)
+set @@global.debug_dbug = @saved_dbug;
+set @saved_dbug = @@global.debug_dbug;
+SET GLOBAL debug_dbug='d,injecting_fault_writing';
DROP USER user1;
DROP USER user1;
ERROR HY000: Error writing file 'master-bin' ((errno: #)
set @@global.debug_dbug = @saved_dbug;
-#
-# Cleanup
-#
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP PROCEDURE IF EXISTS p1;
-DROP TRIGGER IF EXISTS tr1;
-DROP VIEW IF EXISTS v1, v2;
diff --git a/mysql-test/suite/binlog/r/binlog_xa_recover.result b/mysql-test/suite/binlog/r/binlog_xa_recover.result
index 25aa1389b71..f5060fd5160 100644
--- a/mysql-test/suite/binlog/r/binlog_xa_recover.result
+++ b/mysql-test/suite/binlog/r/binlog_xa_recover.result
@@ -146,29 +146,12 @@ master-bin.000004 # Xid # # COMMIT /* XID */
SET DEBUG_SYNC= "now SIGNAL con10_cont";
connection con10;
connection default;
-SET @old_dbug= @@global.DEBUG_DBUG;
-SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
SET DEBUG_SYNC= "now SIGNAL con12_cont";
connection con12;
connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
-SET GLOBAL debug_dbug= @old_dbug;
SET DEBUG_SYNC= "now SIGNAL con11_cont";
connection con11;
connection default;
-Checking that master-bin.000004 is the last binlog checkpoint
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000004 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-master-bin.000004 # Gtid_list # # [#-#-#]
-master-bin.000004 # Binlog_checkpoint # # master-bin.000001
-master-bin.000004 # Gtid # # BEGIN GTID #-#-#
-master-bin.000004 # Annotate_rows # # INSERT INTO t1 VALUES (13, NULL)
-master-bin.000004 # Table_map # # table_id: # (test.t1)
-master-bin.000004 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000004 # Xid # # COMMIT /* XID */
-master-bin.000004 # Binlog_checkpoint # # master-bin.000002
-master-bin.000004 # Binlog_checkpoint # # master-bin.000004
Now crash the server
SET SESSION debug_dbug="+d,crash_commit_after_log";
INSERT INTO t1 VALUES (14, NULL);
diff --git a/mysql-test/suite/binlog/t/binlog_checkpoint_flush_logs.test b/mysql-test/suite/binlog/t/binlog_checkpoint_flush_logs.test
new file mode 100644
index 00000000000..e21f1eb308d
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_checkpoint_flush_logs.test
@@ -0,0 +1,79 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_row.inc
+
+# References:
+#
+# MDEV-24526 binlog rotate via FLUSH LOGS may obsolate binlog file too eary
+#
+# The test for MDEV-24526 proves the fixes correct observed race condition
+# between a commiting transaction and FLUSH-LOGS.
+# The plot.
+# Trx_1 (con1) transaction binlogs first
+# to yield its turn acquiring LOCK_commit_ordered to Trx_2 and stand
+# still waiting of a signal that will never arrive.
+# Trx_2 can't acquire it in the fixed version even though
+# Trx_3 makes sure Trx_2 has reached a post-rotation execution point
+# to signal it to proceed.
+# Then the server gets crashed and Trx_1 must recover unlike
+# in the OLD buggy version.
+#
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+
+--echo *** Test that FLUSH LOGS waits if a transaction ordered commit is in progress.
+
+connect(con1,localhost,root,,); # Trx_1
+# hang before doing acquiring Commit Ordered mutex
+SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
+
+--send INSERT INTO t1 VALUES (1, REPEAT("x", 1))
+
+connection default; # Trx_2
+
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "rotate_after_rotate SIGNAL con_flush_ready WAIT_FOR default_go";
+--send FLUSH LOGS
+
+connect(con2,localhost,root,,); # Trx_3
+--echo Trx_1 is not yet committed:
+SELECT count(*) as 'ZERO' from t1;
+
+--echo Wait for Trx_2 has rotated binlog:
+SET DEBUG_SYNC= "now WAIT_FOR con_flush_ready";
+# Useless signal to prove Trx_2 cannot race Trx_1's commit
+# even though Trx_1 never received the being waited 'con1_go'.
+SET DEBUG_SYNC= "now SIGNAL default_go";
+
+--let $shutdown_timeout=0
+--source include/restart_mysqld.inc
+
+connection default;
+--enable_reconnect
+--error 0,2013
+--reap
+
+--echo Must be tree logs in the list:
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000001
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+--echo Only one Binlog checkpoint must exist and point to master-bin.000001
+--let $binlog_file= master-bin.000002
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+
+# In the buggy server version the following select may have
+# resulted with ZERO:
+SELECT count(*) as 'ONE' from t1;
+
+# Clean up.
+connection default;
+
+DROP TABLE t1;
+SET debug_sync = 'reset';
diff --git a/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors-master.opt b/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors-master.opt
deleted file mode 100644
index 627becdbfb5..00000000000
--- a/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb
diff --git a/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors.test b/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors.test
index e64e7838a31..29a860764a9 100644
--- a/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors.test
+++ b/mysql-test/suite/binlog/t/binlog_spurious_ddl_errors.test
@@ -71,7 +71,6 @@ DROP TABLE t_row;
SET binlog_format = ROW;
CREATE TABLE t_stmt (a VARCHAR(100)) ENGINE = EXAMPLE;
---error ER_NOT_SUPPORTED_YET
ALTER TABLE t_stmt ADD COLUMN b INT;
CREATE TRIGGER trig_stmt BEFORE INSERT ON t_stmt FOR EACH ROW INSERT INTO t_stmt VALUES (1);
diff --git a/mysql-test/suite/binlog/t/binlog_statement_insert_delayed.test b/mysql-test/suite/binlog/t/binlog_statement_insert_delayed.test
index b2af560fa50..9145afc047f 100644
--- a/mysql-test/suite/binlog/t/binlog_statement_insert_delayed.test
+++ b/mysql-test/suite/binlog/t/binlog_statement_insert_delayed.test
@@ -10,3 +10,4 @@ disable_query_log;
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
enable_query_log;
-- source include/binlog_insert_delayed.test
+reset master;
diff --git a/mysql-test/suite/binlog/t/binlog_truncate_innodb-master.opt b/mysql-test/suite/binlog/t/binlog_truncate_innodb-master.opt
deleted file mode 100644
index 69cc489a969..00000000000
--- a/mysql-test/suite/binlog/t/binlog_truncate_innodb-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---loose-innodb \ No newline at end of file
diff --git a/mysql-test/suite/binlog/t/binlog_truncate_innodb.test b/mysql-test/suite/binlog/t/binlog_truncate_innodb.test
index 54a32b96ef1..511b82bd717 100644
--- a/mysql-test/suite/binlog/t/binlog_truncate_innodb.test
+++ b/mysql-test/suite/binlog/t/binlog_truncate_innodb.test
@@ -41,4 +41,5 @@ source include/binlog_truncate.test;
let $before_truncate = SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
source include/binlog_truncate.test;
-SET BINLOG_FORMAT=@old_binlog_format;
+SET @@global.binlog_format = @old_binlog_format;
+SET @@session.binlog_format = @old_binlog_format; \ No newline at end of file
diff --git a/mysql-test/suite/binlog/t/binlog_write_error.test b/mysql-test/suite/binlog/t/binlog_write_error.test
index d66efb0bcf3..2c55173c2a1 100644
--- a/mysql-test/suite/binlog/t/binlog_write_error.test
+++ b/mysql-test/suite/binlog/t/binlog_write_error.test
@@ -1 +1,82 @@
---source include/binlog_write_error.inc
+#
+# This file is included by binlog_encryption.binlog_write_error
+# Please check all dependent tests after modifying it
+#
+
+#
+# === Name ===
+#
+# binlog_write_error.test
+#
+# === Description ===
+#
+# This test case check if the error of writing binlog file is properly
+# reported and handled when executing statements.
+#
+# === Related Bugs ===
+#
+# BUG#37148
+#
+
+source include/have_debug.inc;
+source include/have_binlog_format_mixed_or_statement.inc;
+
+--echo #
+--echo # Test injecting binlog write error when executing queries
+--echo #
+
+let $query= CREATE TABLE t1 (a INT);
+source include/binlog_inject_error.inc;
+
+INSERT INTO t1 VALUES (1),(2),(3);
+
+let $query= INSERT INTO t1 VALUES (4),(5),(6);
+source include/binlog_inject_error.inc;
+
+let $query= UPDATE t1 set a=a+1;
+source include/binlog_inject_error.inc;
+
+let $query= DELETE FROM t1;
+source include/binlog_inject_error.inc;
+
+let $query= CREATE TRIGGER tr1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t1 VALUES (new.a + 100);
+source include/binlog_inject_error.inc;
+
+let $query= DROP TRIGGER tr1;
+source include/binlog_inject_error.inc;
+
+let $query= ALTER TABLE t1 ADD (b INT);
+source include/binlog_inject_error.inc;
+
+let $query= CREATE VIEW v1 AS SELECT a FROM t1;
+source include/binlog_inject_error.inc;
+
+let $query= DROP VIEW v1;
+source include/binlog_inject_error.inc;
+
+let $query= CREATE PROCEDURE p1(OUT rows_cnt INT) SELECT count(*) INTO rows_cnt FROM t1;
+source include/binlog_inject_error.inc;
+
+let $query= DROP PROCEDURE p1;
+source include/binlog_inject_error.inc;
+
+let $query= DROP TABLE t1;
+source include/binlog_inject_error.inc;
+
+let $query= CREATE FUNCTION f1() RETURNS INT return 1;
+source include/binlog_inject_error.inc;
+
+let $query= DROP FUNCTION f1;
+source include/binlog_inject_error.inc;
+
+let $query= CREATE USER user1;
+source include/binlog_inject_error.inc;
+
+let $query= REVOKE ALL PRIVILEGES, GRANT OPTION FROM user1;
+source include/binlog_inject_error.inc;
+
+let $query= SET PASSWORD FOR user1=PASSWORD('foobar');
+source include/binlog_inject_error.inc;
+
+let $query= DROP USER user1;
+source include/binlog_inject_error.inc;
diff --git a/mysql-test/suite/binlog/t/binlog_xa_recover-master.opt b/mysql-test/suite/binlog/t/binlog_xa_recover.opt
index 3c44f9fad10..3c44f9fad10 100644
--- a/mysql-test/suite/binlog/t/binlog_xa_recover-master.opt
+++ b/mysql-test/suite/binlog/t/binlog_xa_recover.opt
diff --git a/mysql-test/suite/binlog/t/binlog_xa_recover.test b/mysql-test/suite/binlog/t/binlog_xa_recover.test
index 411394d4bef..3b2a7e45392 100644
--- a/mysql-test/suite/binlog/t/binlog_xa_recover.test
+++ b/mysql-test/suite/binlog/t/binlog_xa_recover.test
@@ -1 +1,277 @@
---source include/binlog_xa_recover.inc
+#
+# This include file is used by more than one test suite
+# (currently binlog and binlog_encryption).
+# Please check all dependent tests after modifying it
+#
+
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_row.inc
+--source include/have_perfschema.inc
+# Valgrind does not work well with test that crashes the server
+--source include/not_valgrind.inc
+
+# (We do not need to restore these settings, as we crash the server).
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+# Insert some data to force a couple binlog rotations (3), so we get some
+# normal binlog checkpoints before starting the test.
+INSERT INTO t1 VALUES (100, REPEAT("x", 4100));
+# Wait for the master-bin.000002 binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000002"
+--let $field= Info
+--let $condition= = "master-bin.000002"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (101, REPEAT("x", 4100));
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
+--let $field= Info
+--let $condition= = "master-bin.000003"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (102, REPEAT("x", 4100));
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+# Now start a bunch of transactions that span multiple binlog
+# files. Leave then in the state prepared-but-not-committed in the engine
+# and crash the server. Check that crash recovery is able to recover all
+# of them.
+#
+# We use debug_sync to get all the transactions into the prepared state before
+# we commit any of them. This is because the prepare step flushes the InnoDB
+# redo log - including any commits made before, so recovery would become
+# unnecessary, decreasing the value of this test.
+#
+# We arrange to have con1 with a prepared transaction in master-bin.000004,
+# con2 and con3 with a prepared transaction in master-bin.000005, and a new
+# empty master-bin.000006. So the latest binlog checkpoint should be
+# master-bin.000006.
+
+connect(con1,localhost,root,,);
+# First wait after prepare and before write to binlog.
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con1_wait WAIT_FOR con1_cont";
+# Then complete InnoDB commit in memory (but not commit checkpoint / write to
+# disk), and hang until crash, leaving a transaction to be XA recovered.
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con1_wait";
+
+connect(con2,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con2_wait WAIT_FOR con2_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (2, NULL);
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con2_wait";
+
+connect(con3,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con3_wait WAIT_FOR con3_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con3_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (3, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con3_wait";
+
+connect(con4,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con4_wait WAIT_FOR con4_cont";
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+send INSERT INTO t1 VALUES (4, NULL);
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con4_wait";
+
+SET DEBUG_SYNC= "now SIGNAL con1_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "now SIGNAL con2_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
+SET DEBUG_SYNC= "now SIGNAL con3_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
+
+# Check that everything is committed in binary log.
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000003
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000004
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000005
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000006
+--source include/show_binlog_events.inc
+
+
+# Check that server will not purge too much.
+PURGE BINARY LOGS TO "master-bin.000006";
+--source include/show_binary_logs.inc
+
+# Now crash the server with one more transaction in prepared state.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+--error 0,2006,2013
+SET DEBUG_SYNC= "now SIGNAL con4_cont";
+connection con4;
+--error 2006,2013
+reap;
+
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+connection default;
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+--echo Test that with multiple binlog checkpoints, recovery starts from the last one.
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+# Rotate to binlog master-bin.000003 while delaying binlog checkpoints.
+# So we get multiple binlog checkpoints in master-bin.000003.
+# Then complete the checkpoints, crash, and check that we only scan
+# the necessary binlog file (ie. that we use the _last_ checkpoint).
+
+connect(con10,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con10_ready WAIT_FOR con10_cont";
+send INSERT INTO t1 VALUES (10, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con10_ready";
+
+connect(con11,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con11_ready WAIT_FOR con11_cont";
+send INSERT INTO t1 VALUES (11, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con11_ready";
+
+connect(con12,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con12_ready WAIT_FOR con12_cont";
+send INSERT INTO t1 VALUES (12, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con12_ready";
+INSERT INTO t1 VALUES (13, NULL);
+
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000004
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+SET DEBUG_SYNC= "now SIGNAL con10_cont";
+connection con10;
+reap;
+connection default;
+
+# We need to sync the test case with the background processing of the
+# commit checkpoint, otherwise we get nondeterministic results.
+let $wait_condition= select count(*) = 1 from performance_schema.threads where processlist_state = "Waiting for background binlog tasks";
+--source include/wait_condition.inc
+
+SET DEBUG_SYNC= "now SIGNAL con12_cont";
+connection con12;
+reap;
+connection default;
+
+SET DEBUG_SYNC= "now SIGNAL con11_cont";
+connection con11;
+reap;
+
+connection default;
+# Wait for the last (master-bin.000004) binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+--echo Now crash the server
+# It is not too easy to test XA recovery, as it runs early during server
+# startup, before any connections can be made.
+# What we do is set a DBUG error insert which will crash if XA recovery
+# starts from any other binlog than master-bin.000004 (check the file
+# binlog_xa_recover-master.opt). Then we will fail here if XA recovery
+# would start from the wrong place.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+--error 2006,2013
+INSERT INTO t1 VALUES (14, NULL);
+
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+connection default;
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+
+--echo *** Check that recovery works if we crashed early during rotate, before
+--echo *** binlog checkpoint event could be written.
+
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+# We need some initial data to reach binlog master-bin.000004. Otherwise
+# crash recovery fails due to the error insert used for previous test.
+INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (22, REPEAT("x", 4100));
+# Wait for the master-bin.000003 binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
+--let $field= Info
+--let $condition= = "master-bin.000003"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (23, REPEAT("x", 4100));
+# Wait for the last (master-bin.000004) binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+SET SESSION debug_dbug="+d,crash_before_write_checkpoint_event";
+--error 2006,2013
+INSERT INTO t1 VALUES (24, REPEAT("x", 4100));
+
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000004
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+# Cleanup
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/binlog_encryption/binlog_write_error.result b/mysql-test/suite/binlog_encryption/binlog_write_error.result
index 6e8a212035a..2ee68465243 100644
--- a/mysql-test/suite/binlog_encryption/binlog_write_error.result
+++ b/mysql-test/suite/binlog_encryption/binlog_write_error.result
@@ -1,15 +1,4 @@
#
-# Initialization
-#
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP FUNCTION IF EXISTS f2;
-DROP PROCEDURE IF EXISTS p1;
-DROP PROCEDURE IF EXISTS p2;
-DROP TRIGGER IF EXISTS tr1;
-DROP TRIGGER IF EXISTS tr2;
-DROP VIEW IF EXISTS v1, v2;
-#
# Test injecting binlog write error when executing queries
#
set @saved_dbug = @@global.debug_dbug;
@@ -111,15 +100,13 @@ ERROR HY000: Error writing file 'master-bin' ((errno: #)
set @@global.debug_dbug = @saved_dbug;
set @saved_dbug = @@global.debug_dbug;
SET GLOBAL debug_dbug='d,injecting_fault_writing';
+SET PASSWORD FOR user1=PASSWORD('foobar');
+SET PASSWORD FOR user1=PASSWORD('foobar');
+ERROR HY000: Error writing file 'master-bin' ((errno: #)
+set @@global.debug_dbug = @saved_dbug;
+set @saved_dbug = @@global.debug_dbug;
+SET GLOBAL debug_dbug='d,injecting_fault_writing';
DROP USER user1;
DROP USER user1;
ERROR HY000: Error writing file 'master-bin' ((errno: #)
set @@global.debug_dbug = @saved_dbug;
-#
-# Cleanup
-#
-DROP TABLE IF EXISTS t1, t2;
-DROP FUNCTION IF EXISTS f1;
-DROP PROCEDURE IF EXISTS p1;
-DROP TRIGGER IF EXISTS tr1;
-DROP VIEW IF EXISTS v1, v2;
diff --git a/mysql-test/suite/binlog_encryption/binlog_write_error.test b/mysql-test/suite/binlog_encryption/binlog_write_error.test
index 68e59655eab..8add24da984 100644
--- a/mysql-test/suite/binlog_encryption/binlog_write_error.test
+++ b/mysql-test/suite/binlog_encryption/binlog_write_error.test
@@ -1 +1 @@
---source suite/binlog/include/binlog_write_error.inc
+--source suite/binlog/t/binlog_write_error.test
diff --git a/mysql-test/suite/binlog_encryption/binlog_xa_recover-master.opt b/mysql-test/suite/binlog_encryption/binlog_xa_recover-master.opt
deleted file mode 100644
index 3c44f9fad10..00000000000
--- a/mysql-test/suite/binlog_encryption/binlog_xa_recover-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---skip-stack-trace --skip-core-file --loose-debug-dbug=+d,xa_recover_expect_master_bin_000004
diff --git a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
index af36fe277a1..3e4ed42cf7c 100644
--- a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
+++ b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
@@ -151,30 +151,12 @@ master-bin.000004 # Xid # # COMMIT /* XID */
SET DEBUG_SYNC= "now SIGNAL con10_cont";
connection con10;
connection default;
-SET @old_dbug= @@global.DEBUG_DBUG;
-SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
SET DEBUG_SYNC= "now SIGNAL con12_cont";
connection con12;
connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
-SET GLOBAL debug_dbug= @old_dbug;
SET DEBUG_SYNC= "now SIGNAL con11_cont";
connection con11;
connection default;
-Checking that master-bin.000004 is the last binlog checkpoint
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000004 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-master-bin.000004 # Start_encryption # #
-master-bin.000004 # Gtid_list # # [#-#-#]
-master-bin.000004 # Binlog_checkpoint # # master-bin.000001
-master-bin.000004 # Gtid # # BEGIN GTID #-#-#
-master-bin.000004 # Annotate_rows # # INSERT INTO t1 VALUES (13, NULL)
-master-bin.000004 # Table_map # # table_id: # (test.t1)
-master-bin.000004 # Write_rows_v1 # # table_id: # flags: STMT_END_F
-master-bin.000004 # Xid # # COMMIT /* XID */
-master-bin.000004 # Binlog_checkpoint # # master-bin.000002
-master-bin.000004 # Binlog_checkpoint # # master-bin.000004
Now crash the server
SET SESSION debug_dbug="+d,crash_commit_after_log";
INSERT INTO t1 VALUES (14, NULL);
diff --git a/mysql-test/suite/binlog_encryption/binlog_xa_recover.test b/mysql-test/suite/binlog_encryption/binlog_xa_recover.test
index e603ccc30fe..c03adb6aff2 100644
--- a/mysql-test/suite/binlog_encryption/binlog_xa_recover.test
+++ b/mysql-test/suite/binlog_encryption/binlog_xa_recover.test
@@ -1 +1 @@
---source suite/binlog/include/binlog_xa_recover.inc
+--source suite/binlog/t/binlog_xa_recover.test
diff --git a/mysql-test/suite/binlog_encryption/rpl_semi_sync.result b/mysql-test/suite/binlog_encryption/rpl_semi_sync.result
index 106efb555d3..d18bd1efda7 100644
--- a/mysql-test/suite/binlog_encryption/rpl_semi_sync.result
+++ b/mysql-test/suite/binlog_encryption/rpl_semi_sync.result
@@ -164,20 +164,15 @@ connection slave;
connection slave;
include/stop_slave.inc
connection master;
+include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
[ master status should be ON ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 0
show status like 'Rpl_semi_sync_master_yes_tx';
Variable_name Value
Rpl_semi_sync_master_yes_tx 14
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
[ semi-sync replication of these transactions will fail ]
insert into t1 values (500);
[ master status should be OFF ]
@@ -235,9 +230,6 @@ max(a)
500
connection master;
[ master status should be ON again after slave catches up ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 12
@@ -304,8 +296,6 @@ connection master;
create table t1 (a int) engine = ENGINE_TYPE;
drop table t1;
connection slave;
-show status like 'Rpl_relay%';
-Variable_name Value
[ test reset master ]
connection master;
reset master;
@@ -321,7 +311,7 @@ Rpl_semi_sync_master_yes_tx 0
connection slave;
include/stop_slave.inc
reset slave;
-connection master;
+include/kill_binlog_dump_threads.inc
connection slave;
include/start_slave.inc
connection master;
@@ -353,6 +343,7 @@ include/stop_slave.inc
reset slave;
connection master;
reset master;
+include/kill_binlog_dump_threads.inc
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
@@ -403,10 +394,8 @@ SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status OFF
connection master;
+include/kill_binlog_dump_threads.inc
[ Semi-sync status on master should be ON ]
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status ON
diff --git a/mysql-test/suite/compat/oracle/r/table_value_constr.result b/mysql-test/suite/compat/oracle/r/table_value_constr.result
index 4383845cd87..cb198764538 100644
--- a/mysql-test/suite/compat/oracle/r/table_value_constr.result
+++ b/mysql-test/suite/compat/oracle/r/table_value_constr.result
@@ -746,7 +746,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where 1
explain extended select * from t1
where a in (select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
@@ -981,7 +981,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where 1
explain extended select * from t1
where a = any (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
index ff42e975b8d..8eac34ab399 100644
--- a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
+++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
@@ -8,6 +8,8 @@ SET GLOBAL innodb_encrypt_tables = ON;
SET GLOBAL innodb_encryption_threads = 4;
call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to \"strict_(crc32|none|innodb)\" but the page \\[page id: space=[0-9]+, page number=[0-9]+\\] contains a valid checksum \"(innodb|none|crc32)\"");
SET GLOBAL innodb_checksum_algorithm = innodb;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
SET GLOBAL innodb_default_encryption_key_id=4;
SET GLOBAL innodb_checksum_algorithm=crc32;
create table tce_crc32(a serial, b blob, index(b(10))) engine=innodb
@@ -90,6 +92,8 @@ update tpe_crc32 set b=substr(b,1);
ALTER TABLE tp_crc32 IMPORT TABLESPACE;
update tp_crc32 set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=innodb;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_crc32 DISCARD TABLESPACE;
ALTER TABLE tc_crc32 DISCARD TABLESPACE;
ALTER TABLE te_crc32 DISCARD TABLESPACE;
@@ -115,6 +119,8 @@ update tpe_crc32 set b=substr(b,1);
ALTER TABLE tp_crc32 IMPORT TABLESPACE;
update tp_crc32 set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=none;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_crc32 DISCARD TABLESPACE;
ALTER TABLE tc_crc32 DISCARD TABLESPACE;
ALTER TABLE te_crc32 DISCARD TABLESPACE;
@@ -151,6 +157,8 @@ test.tp_crc32 check status OK
DROP TABLE tce_crc32, tc_crc32, te_crc32,
t_crc32, tpe_crc32, tp_crc32;
SET GLOBAL innodb_checksum_algorithm=innodb;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
create table tce_innodb(a serial, b blob, index(b(10))) engine=innodb
ROW_FORMAT=COMPRESSED encrypted=yes;
create table tc_innodb(a serial, b blob, index(b(10))) engine=innodb
@@ -231,6 +239,8 @@ update tpe_innodb set b=substr(b,1);
ALTER TABLE tp_innodb IMPORT TABLESPACE;
update tp_innodb set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=innodb;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_innodb DISCARD TABLESPACE;
ALTER TABLE tc_innodb DISCARD TABLESPACE;
ALTER TABLE te_innodb DISCARD TABLESPACE;
@@ -256,6 +266,8 @@ update tpe_innodb set b=substr(b,1);
ALTER TABLE tp_innodb IMPORT TABLESPACE;
update tp_innodb set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=none;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_innodb DISCARD TABLESPACE;
ALTER TABLE tc_innodb DISCARD TABLESPACE;
ALTER TABLE te_innodb DISCARD TABLESPACE;
@@ -292,6 +304,8 @@ test.tp_innodb check status OK
DROP TABLE tce_innodb, tc_innodb, te_innodb,
t_innodb, tpe_innodb, tp_innodb;
SET GLOBAL innodb_checksum_algorithm=none;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
create table tce_none(a serial, b blob, index(b(10))) engine=innodb
ROW_FORMAT=COMPRESSED encrypted=yes;
create table tc_none(a serial, b blob, index(b(10))) engine=innodb
@@ -372,6 +386,8 @@ update tpe_none set b=substr(b,1);
ALTER TABLE tp_none IMPORT TABLESPACE;
update tp_none set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=innodb;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_none DISCARD TABLESPACE;
ALTER TABLE tc_none DISCARD TABLESPACE;
ALTER TABLE te_none DISCARD TABLESPACE;
@@ -397,6 +413,8 @@ update tpe_none set b=substr(b,1);
ALTER TABLE tp_none IMPORT TABLESPACE;
update tp_none set b=substr(b,1);
SET GLOBAL innodb_checksum_algorithm=none;
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
ALTER TABLE tce_none DISCARD TABLESPACE;
ALTER TABLE tc_none DISCARD TABLESPACE;
ALTER TABLE te_none DISCARD TABLESPACE;
diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_freed.result b/mysql-test/suite/encryption/r/innodb_encrypt_freed.result
new file mode 100644
index 00000000000..c0d8b770403
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb_encrypt_freed.result
@@ -0,0 +1,100 @@
+SHOW VARIABLES LIKE 'innodb_encrypt%';
+Variable_name Value
+innodb_encrypt_log ON
+innodb_encrypt_tables ON
+innodb_encrypt_temporary_tables OFF
+innodb_encryption_rotate_key_age 1
+innodb_encryption_rotation_iops 100
+innodb_encryption_threads 1
+SET GLOBAL innodb_encrypt_tables = ON;
+CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
+f3 int not null, index(f1), index idx_1(f2),
+index(f2, f3)) ENGINE=InnoDB;
+# Wait max 10 min for key encryption threads to encrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+CREATE TABLE t2 (f1 int not null)engine=innodb;
+# restart: --debug=d,ib_log_checkpoint_avoid
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+connect con1,localhost,root,,,;
+begin;
+insert into t2 values(1);
+connection default;
+set global innodb_encrypt_tables = OFF;
+# Wait max 10 min for key encryption threads to decrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+test/t2
+alter table t1 drop index idx_1;
+set global innodb_encrypt_tables = ON;
+# Wait max 10 min for key encryption threads to encrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+test/t2
+disconnect con1;
+# restart: --debug=d,ib_log_checkpoint_avoid
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+drop table t1, t2;
+CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
+f3 int not null, index(f1), index idx_1(f2),
+index(f2, f3)) ENGINE=InnoDB;
+# Wait max 10 min for key encryption threads to encrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+CREATE TABLE t2 (f1 int not null)engine=innodb;
+# restart: --debug=d,ib_log_checkpoint_avoid
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+connect con1,localhost,root,,,;
+begin;
+insert into t2 values(1);
+connection default;
+set global innodb_encrypt_tables = OFF;
+# Wait max 10 min for key encryption threads to decrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+test/t2
+alter table t1 drop index idx_1;
+disconnect con1;
+# restart: --debug=d,ib_log_checkpoint_avoid
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+connect con1,localhost,root,,,;
+begin;
+insert into t2 values(1);
+connection default;
+set global innodb_encrypt_tables = ON;
+# Wait max 10 min for key encryption threads to encrypt all spaces
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+NAME
+innodb_system
+mysql/innodb_index_stats
+mysql/innodb_table_stats
+mysql/transaction_registry
+test/t1
+test/t2
+disconnect con1;
+drop table t2, t1;
diff --git a/mysql-test/suite/encryption/r/tempfiles_encrypted.result b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
index 14c754a76bc..43dcbbc2d70 100644
--- a/mysql-test/suite/encryption/r/tempfiles_encrypted.result
+++ b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
@@ -3872,6 +3872,32 @@ NULL
DROP VIEW v1;
DROP TABLE t1,t2;
#
+# MDEV-25032 Window functions without column references get removed from ORDER BY
+#
+create table t1 (id int, score double);
+insert into t1 values
+(1, 5),
+(1, 6),
+(1, 6),
+(1, 6),
+(1, 7),
+(1, 8.1),
+(1, 9),
+(1, 10);
+select id, row_number() over () rn
+from t1
+order by rn desc;
+id rn
+1 8
+1 7
+1 6
+1 5
+1 4
+1 3
+1 2
+1 1
+drop table t1;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/suite/encryption/t/innodb-discard-import-change.combinations b/mysql-test/suite/encryption/t/innodb-discard-import-change.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-discard-import-change.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/encryption/t/innodb-discard-import.combinations b/mysql-test/suite/encryption/t/innodb-discard-import.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb-discard-import.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/encryption/t/innodb_encrypt_freed.opt b/mysql-test/suite/encryption/t/innodb_encrypt_freed.opt
new file mode 100644
index 00000000000..f6f932c680d
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb_encrypt_freed.opt
@@ -0,0 +1,5 @@
+--innodb-encrypt-tables
+--innodb-encrypt-log
+--innodb-encryption-threads=1
+--innodb-tablespaces-encryption
+--innodb-log-optimize-ddl=OFF
diff --git a/mysql-test/suite/encryption/t/innodb_encrypt_freed.test b/mysql-test/suite/encryption/t/innodb_encrypt_freed.test
new file mode 100644
index 00000000000..951fc93eef5
--- /dev/null
+++ b/mysql-test/suite/encryption/t/innodb_encrypt_freed.test
@@ -0,0 +1,121 @@
+--source include/have_innodb.inc
+--source include/have_example_key_management_plugin.inc
+--source include/have_debug.inc
+--source include/not_embedded.inc
+
+SHOW VARIABLES LIKE 'innodb_encrypt%';
+
+SET GLOBAL innodb_encrypt_tables = ON;
+
+CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
+ f3 int not null, index(f1), index idx_1(f2),
+ index(f2, f3)) ENGINE=InnoDB;
+--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+
+--echo # Wait max 10 min for key encryption threads to encrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+
+CREATE TABLE t2 (f1 int not null)engine=innodb;
+let $restart_parameters="--debug=d,ib_log_checkpoint_avoid";
+--source include/restart_mysqld.inc
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+
+# Stop the purge
+connect(con1,localhost,root,,,);
+begin;
+insert into t2 values(1);
+
+connection default;
+
+--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+set global innodb_encrypt_tables = OFF;
+
+--echo # Wait max 10 min for key encryption threads to decrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+
+# Free the index `idx_1`
+alter table t1 drop index idx_1;
+
+set global innodb_encrypt_tables = ON;
+
+--echo # Wait max 10 min for key encryption threads to encrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+
+disconnect con1;
+let $shutdown_timeout=0;
+--source include/restart_mysqld.inc
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+
+drop table t1, t2;
+
+#
+#
+CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
+ f3 int not null, index(f1), index idx_1(f2),
+ index(f2, f3)) ENGINE=InnoDB;
+--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+
+--echo # Wait max 10 min for key encryption threads to encrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+
+CREATE TABLE t2 (f1 int not null)engine=innodb;
+--source include/restart_mysqld.inc
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+
+# Stop the purge
+connect(con1,localhost,root,,,);
+begin;
+insert into t2 values(1);
+
+connection default;
+
+--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+set global innodb_encrypt_tables = OFF;
+
+--echo # Wait max 10 min for key encryption threads to decrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
+
+# Free the index `idx_1`
+alter table t1 drop index idx_1;
+
+disconnect con1;
+--source include/restart_mysqld.inc
+SET GLOBAL innodb_log_checkpoint_now=TRUE;
+
+# Stop the purge
+connect(con1,localhost,root,,,);
+begin;
+insert into t2 values(1);
+
+connection default;
+set global innodb_encrypt_tables = ON;
+
+--echo # Wait max 10 min for key encryption threads to encrypt all spaces
+--let $wait_timeout= 600
+--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+--source include/wait_condition.inc
+--sorted_result
+SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
+
+disconnect con1;
+drop table t2, t1;
diff --git a/mysql-test/suite/encryption/t/innodb_encryption.test b/mysql-test/suite/encryption/t/innodb_encryption.test
index a1abfb51462..1c8d200458a 100644
--- a/mysql-test/suite/encryption/t/innodb_encryption.test
+++ b/mysql-test/suite/encryption/t/innodb_encryption.test
@@ -14,7 +14,7 @@ SHOW VARIABLES LIKE 'innodb_encrypt%';
SET GLOBAL innodb_encrypt_tables = ON;
---let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces + 1 from information_schema.tables where engine = 'InnoDB'`
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600
diff --git a/mysql-test/suite/engines/funcs/disabled.def b/mysql-test/suite/engines/funcs/disabled.def
index c43b0c92bc9..e1e2491b4a8 100644
--- a/mysql-test/suite/engines/funcs/disabled.def
+++ b/mysql-test/suite/engines/funcs/disabled.def
@@ -1,97 +1,4 @@
# List of disabled tests
# test name : comment
-rpl_redirect : Fails due to bug#49978
-crash_manycolumns_string : Bug#50495 'Row size too large' for plugin, but works for built-in innodb
-ix_unique_lob : Bug#52283 Innodb reports extra warnings when SELECT/WHERE is performed using invalid value
-ix_unique_string_length : Bug#52283 Innodb reports extra warnings when SELECT/WHERE is performed using invalid value
-rpl_empty_master_crash : Test not applicable in 5.5+
-rpl_REDIRECT : Test not applicable in 5.5+
-rpl_create_database : Result Difference Due To Change In .inc file
-rpl_loaddata_m : Result Difference Due To Change In .inc file
-rpl_sp_effects : Result Difference Due To Change In .inc file
-rpl_variables : Result Difference Due To Change In .inc file
-rpl_loaddata_s : Result Difference Due To Change In .inc file
-rpl_dual_pos_advance : Result Difference Due To Change In .inc file
-rpl000011 : Result Difference Due To Change In .inc file
-rpl000013 : Result Difference Due To Change In .inc file
-rpl_000015 : Result Difference Due To Change In .inc file
-rpl_LD_INFILE : Result Difference Due To Change In .inc file
-rpl_alter : Result Difference Due To Change In .inc file
-rpl_alter_db : Result Difference Due To Change In .inc file
-rpl_alter_db : Result Difference Due To Change In .inc file
-rpl_bit : Result Difference Due To Change In .inc file
-rpl_bit_npk : Result Difference Due To Change In .inc file
-rpl_change_master : Result Difference Due To Change In .inc file
-rpl_do_grant : Result Difference Due To Change In .inc file
-rpl_drop : Result Difference Due To Change In .inc file
-rpl_drop_db : Result Difference Due To Change In .inc file
-rpl_flushlog_loop : Result Difference Due To Change In .inc file
-rpl_get_lock : Result Difference Due To Change In .inc file
-rpl_get_lock : Result Difference Due To Change In .inc file
-rpl_insert : Result Difference Due To Change In .inc file
-rpl_insert_select : Result Difference Due To Change In .inc file
-rpl_loaddata2 : Result Difference Due To Change In .inc file
-rpl_loaddatalocal : Result Difference Due To Change In .inc file
-rpl_loadfile : Result Difference Due To Change In .inc file
-rpl_log_pos : Result Difference Due To Change In .inc file
-rpl_many_optimize : Result Difference Due To Change In .inc file
-rpl_master_pos_wait : Result Difference Due To Change In .inc file
-rpl_misc_functions : Result Difference Due To Change In .inc file
-rpl_ps : Result Difference Due To Change In .inc file
-rpl_server_id1 : Result Difference Due To Change In .inc file
-rpl_session_var : Result Difference Due To Change In .inc file
-rpl_sf : Result Difference Due To Change In .inc file
-rpl_slave_status : Result Difference Due To Change In .inc file
-rpl_sp004 : Result Difference Due To Change In .inc file
-rpl_start_stop_slave : Result Difference Due To Change In .inc file
-rpl_stm_max_relay_size : Result Difference Due To Change In .inc file
-rpl_stm_mystery22 : Result Difference Due To Change In .inc file
-rpl_stm_no_op : Result Difference Due To Change In .inc file
-rpl_stm_reset_slave : Result Difference Due To Change In .inc file
-rpl_temp_table : Result Difference Due To Change In .inc file
-rpl_temporary : Result Difference Due To Change In .inc file
-rpl_trigger : Result Difference Due To Change In .inc file
-rpl_trunc_temp : Result Difference Due To Change In .inc file
-rpl_user_variables : Result Difference Due To Change In .inc file
-rpl_server_id2 : Result Difference Due To Change In .inc file
-rpl000010 : Result Difference Due To Change In .inc file
-rpl_init_slave : Result Difference Due To Change In .inc file
-rpl_multi_delete2 : Result Difference Due To Change In .inc file
-rpl_view : Result Difference Due To Change In .inc file
-rpl_ignore_table_update : Result Difference Due To Change In .inc file
-rpl_err_ignoredtable : Result Difference Due To Change In .inc file
-rpl_multi_update4 : Result Difference Due To Change In .inc file
-rpl_multi_delete : Result Difference Due To Change In .inc file
-rpl_ignore_grant : Result Difference Due To Change In .inc file
-rpl_ignore_revoke : Result Difference Due To Change In .inc file
-rpl_free_items : Result Difference Due To Change In .inc file
-rpl_replicate_ignore_db : Result Difference Due To Change In .inc file
-rpl000017 : Result Difference Due To Change In .inc file
-rpl_skip_error : Result Difference Due To Change In .inc file
-
-rpl_loaddata_s : Test Present in rpl suite as well . Test Fails due to bin log truncation.
-rpl_log_pos : Test Present in rpl suite as well . Test Fails due to bin log truncation.
-rpl_row_NOW : Result Difference Due To Change In .inc file
-rpl_row_USER : Result Difference Due To Change In .inc file
-rpl_row_drop : Result Difference Due To Change In .inc file
-rpl_row_func001 : Result Difference Due To Change In .inc file
-rpl_row_max_relay_size : Result Difference Due To Change In .inc file
-rpl_row_reset_slave : Result Difference Due To Change In .inc file
-rpl_row_sp001 : Result Difference Due To Change In .inc file
-rpl_row_sp005 : Result Difference Due To Change In .inc file
-rpl_row_sp008 : Result Difference Due To Change In .inc file
-rpl_row_sp009 : Result Difference Due To Change In .inc file
-rpl_row_sp010 : Result Difference Due To Change In .inc file
-rpl_row_sp011 : Result Difference Due To Change In .inc file
-rpl_row_sp012 : Result Difference Due To Change In .inc file
-rpl_row_stop_middle : Result Difference Due To Change In .inc file
-rpl_row_trig001 : Result Difference Due To Change In .inc file
-rpl_row_trig002 : Result Difference Due To Change In .inc file
-rpl_row_trig003 : Result Difference Due To Change In .inc file
-rpl_row_view01 : Result Difference Due To Change In .inc file
-rpl_switch_stm_row_mixed : Result Difference Due To Change In .inc file
-rpl_relayspace : Result Difference Due To Change In .inc file
-rpl_row_inexist_tbl : Result Difference Due To Change In .inc file
-rpl_sp : Result Difference Due To Change In .inc file
-rpl_rbr_to_sbr : Result Difference Due To Change In .inc file
+crash_manycolumns_string : Bug#50495 'Row size too large' for plugin, but works for built-in innodb [ERROR] InnoDB: Cannot add field `c726` in table `test`.`t1` because after adding it, the row size is 8135 which is greater than maximum allowed size (8126 bytes) for a record on index leaf page
diff --git a/mysql-test/suite/engines/funcs/r/crash_manycolumns_string.result b/mysql-test/suite/engines/funcs/r/crash_manycolumns_string.result
index 47079e34b38..6a2acb7ec86 100644
--- a/mysql-test/suite/engines/funcs/r/crash_manycolumns_string.result
+++ b/mysql-test/suite/engines/funcs/r/crash_manycolumns_string.result
@@ -1,5 +1,8 @@
-DROP TABLE IF EXISTS t1;
+SET SESSION innodb_strict_mode=off;
+call mtr.add_suppression("Row size too large (> 8126)*");
CREATE TABLE t1 (c1 VARCHAR(10),c2 VARCHAR(10),c3 VARCHAR(10),c4 VARCHAR(10),c5 VARCHAR(10),c6 VARCHAR(10),c7 VARCHAR(10),c8 VARCHAR(10),c9 VARCHAR(10),c10 VARCHAR(10),c11 VARCHAR(10),c12 VARCHAR(10),c13 VARCHAR(10),c14 VARCHAR(10),c15 VARCHAR(10),c16 VARCHAR(10),c17 VARCHAR(10),c18 VARCHAR(10),c19 VARCHAR(10),c20 VARCHAR(10),c21 VARCHAR(10),c22 VARCHAR(10),c23 VARCHAR(10),c24 VARCHAR(10),c25 VARCHAR(10),c26 VARCHAR(10),c27 VARCHAR(10),c28 VARCHAR(10),c29 VARCHAR(10),c30 VARCHAR(10),c31 VARCHAR(10),c32 VARCHAR(10),c33 VARCHAR(10),c34 VARCHAR(10),c35 VARCHAR(10),c36 VARCHAR(10),c37 VARCHAR(10),c38 VARCHAR(10),c39 VARCHAR(10),c40 VARCHAR(10),c41 VARCHAR(10),c42 VARCHAR(10),c43 VARCHAR(10),c44 VARCHAR(10),c45 VARCHAR(10),c46 VARCHAR(10),c47 VARCHAR(10),c48 VARCHAR(10),c49 VARCHAR(10),c50 VARCHAR(10),c51 VARCHAR(10),c52 VARCHAR(10),c53 VARCHAR(10),c54 VARCHAR(10),c55 VARCHAR(10),c56 VARCHAR(10),c57 VARCHAR(10),c58 VARCHAR(10),c59 VARCHAR(10),c60 VARCHAR(10),c61 VARCHAR(10),c62 VARCHAR(10),c63 VARCHAR(10),c64 VARCHAR(10),c65 VARCHAR(10),c66 VARCHAR(10),c67 VARCHAR(10),c68 VARCHAR(10),c69 VARCHAR(10),c70 VARCHAR(10),c71 VARCHAR(10),c72 VARCHAR(10),c73 VARCHAR(10),c74 VARCHAR(10),c75 VARCHAR(10),c76 VARCHAR(10),c77 VARCHAR(10),c78 VARCHAR(10),c79 VARCHAR(10),c80 VARCHAR(10),c81 VARCHAR(10),c82 VARCHAR(10),c83 VARCHAR(10),c84 VARCHAR(10),c85 VARCHAR(10),c86 VARCHAR(10),c87 VARCHAR(10),c88 VARCHAR(10),c89 VARCHAR(10),c90 VARCHAR(10),c91 VARCHAR(10),c92 VARCHAR(10),c93 VARCHAR(10),c94 VARCHAR(10),c95 VARCHAR(10),c96 VARCHAR(10),c97 VARCHAR(10),c98 VARCHAR(10),c99 VARCHAR(10),c100 VARCHAR(10),c101 VARCHAR(10),c102 VARCHAR(10),c103 VARCHAR(10),c104 VARCHAR(10),c105 VARCHAR(10),c106 VARCHAR(10),c107 VARCHAR(10),c108 VARCHAR(10),c109 VARCHAR(10),c110 VARCHAR(10),c111 VARCHAR(10),c112 VARCHAR(10),c113 VARCHAR(10),c114 VARCHAR(10),c115 VARCHAR(10),c116 VARCHAR(10),c117 VARCHAR(10),c118 VARCHAR(10),c119 VARCHAR(10),c120 VARCHAR(10),c121 VARCHAR(10),c122 VARCHAR(10),c123 VARCHAR(10),c124 VARCHAR(10),c125 VARCHAR(10),c126 VARCHAR(10),c127 VARCHAR(10),c128 VARCHAR(10),c129 VARCHAR(10),c130 VARCHAR(10),c131 VARCHAR(10),c132 VARCHAR(10),c133 VARCHAR(10),c134 VARCHAR(10),c135 VARCHAR(10),c136 VARCHAR(10),c137 VARCHAR(10),c138 VARCHAR(10),c139 VARCHAR(10),c140 VARCHAR(10),c141 VARCHAR(10),c142 VARCHAR(10),c143 VARCHAR(10),c144 VARCHAR(10),c145 VARCHAR(10),c146 VARCHAR(10),c147 VARCHAR(10),c148 VARCHAR(10),c149 VARCHAR(10),c150 VARCHAR(10),c151 VARCHAR(10),c152 VARCHAR(10),c153 VARCHAR(10),c154 VARCHAR(10),c155 VARCHAR(10),c156 VARCHAR(10),c157 VARCHAR(10),c158 VARCHAR(10),c159 VARCHAR(10),c160 VARCHAR(10),c161 VARCHAR(10),c162 VARCHAR(10),c163 VARCHAR(10),c164 VARCHAR(10),c165 VARCHAR(10),c166 VARCHAR(10),c167 VARCHAR(10),c168 VARCHAR(10),c169 VARCHAR(10),c170 VARCHAR(10),c171 VARCHAR(10),c172 VARCHAR(10),c173 VARCHAR(10),c174 VARCHAR(10),c175 VARCHAR(10),c176 VARCHAR(10),c177 VARCHAR(10),c178 VARCHAR(10),c179 VARCHAR(10),c180 VARCHAR(10),c181 VARCHAR(10),c182 VARCHAR(10),c183 VARCHAR(10),c184 VARCHAR(10),c185 VARCHAR(10),c186 VARCHAR(10),c187 VARCHAR(10),c188 VARCHAR(10),c189 VARCHAR(10),c190 VARCHAR(10),c191 VARCHAR(10),c192 VARCHAR(10),c193 VARCHAR(10),c194 VARCHAR(10),c195 VARCHAR(10),c196 VARCHAR(10),c197 VARCHAR(10),c198 VARCHAR(10),c199 VARCHAR(10),c200 VARCHAR(10),c201 VARCHAR(10),c202 VARCHAR(10),c203 VARCHAR(10),c204 VARCHAR(10),c205 VARCHAR(10),c206 VARCHAR(10),c207 VARCHAR(10),c208 VARCHAR(10),c209 VARCHAR(10),c210 VARCHAR(10),c211 VARCHAR(10),c212 VARCHAR(10),c213 VARCHAR(10),c214 VARCHAR(10),c215 VARCHAR(10),c216 VARCHAR(10),c217 VARCHAR(10),c218 VARCHAR(10),c219 VARCHAR(10),c220 VARCHAR(10),c221 VARCHAR(10),c222 VARCHAR(10),c223 VARCHAR(10),c224 VARCHAR(10),c225 VARCHAR(10),c226 VARCHAR(10),c227 VARCHAR(10),c228 VARCHAR(10),c229 VARCHAR(10),c230 VARCHAR(10),c231 VARCHAR(10),c232 VARCHAR(10),c233 VARCHAR(10),c234 VARCHAR(10),c235 VARCHAR(10),c236 VARCHAR(10),c237 VARCHAR(10),c238 VARCHAR(10),c239 VARCHAR(10),c240 VARCHAR(10),c241 VARCHAR(10),c242 VARCHAR(10),c243 VARCHAR(10),c244 VARCHAR(10),c245 VARCHAR(10),c246 VARCHAR(10),c247 VARCHAR(10),c248 VARCHAR(10),c249 VARCHAR(10),c250 VARCHAR(10),c251 VARCHAR(10),c252 VARCHAR(10),c253 VARCHAR(10),c254 VARCHAR(10),c255 VARCHAR(10),c256 VARCHAR(10),c257 VARCHAR(10),c258 VARCHAR(10),c259 VARCHAR(10),c260 VARCHAR(10),c261 VARCHAR(10),c262 VARCHAR(10),c263 VARCHAR(10),c264 VARCHAR(10),c265 VARCHAR(10),c266 VARCHAR(10),c267 VARCHAR(10),c268 VARCHAR(10),c269 VARCHAR(10),c270 VARCHAR(10),c271 VARCHAR(10),c272 VARCHAR(10),c273 VARCHAR(10),c274 VARCHAR(10),c275 VARCHAR(10),c276 VARCHAR(10),c277 VARCHAR(10),c278 VARCHAR(10),c279 VARCHAR(10),c280 VARCHAR(10),c281 VARCHAR(10),c282 VARCHAR(10),c283 VARCHAR(10),c284 VARCHAR(10),c285 VARCHAR(10),c286 VARCHAR(10),c287 VARCHAR(10),c288 VARCHAR(10),c289 VARCHAR(10),c290 VARCHAR(10),c291 VARCHAR(10),c292 VARCHAR(10),c293 VARCHAR(10),c294 VARCHAR(10),c295 VARCHAR(10),c296 VARCHAR(10),c297 VARCHAR(10),c298 VARCHAR(10),c299 VARCHAR(10),c300 VARCHAR(10),c301 VARCHAR(10),c302 VARCHAR(10),c303 VARCHAR(10),c304 VARCHAR(10),c305 VARCHAR(10),c306 VARCHAR(10),c307 VARCHAR(10),c308 VARCHAR(10),c309 VARCHAR(10),c310 VARCHAR(10),c311 VARCHAR(10),c312 VARCHAR(10),c313 VARCHAR(10),c314 VARCHAR(10),c315 VARCHAR(10),c316 VARCHAR(10),c317 VARCHAR(10),c318 VARCHAR(10),c319 VARCHAR(10),c320 VARCHAR(10),c321 VARCHAR(10),c322 VARCHAR(10),c323 VARCHAR(10),c324 VARCHAR(10),c325 VARCHAR(10),c326 VARCHAR(10),c327 VARCHAR(10),c328 VARCHAR(10),c329 VARCHAR(10),c330 VARCHAR(10),c331 VARCHAR(10),c332 VARCHAR(10),c333 VARCHAR(10),c334 VARCHAR(10),c335 VARCHAR(10),c336 VARCHAR(10),c337 VARCHAR(10),c338 VARCHAR(10),c339 VARCHAR(10),c340 VARCHAR(10),c341 VARCHAR(10),c342 VARCHAR(10),c343 VARCHAR(10),c344 VARCHAR(10),c345 VARCHAR(10),c346 VARCHAR(10),c347 VARCHAR(10),c348 VARCHAR(10),c349 VARCHAR(10),c350 VARCHAR(10),c351 VARCHAR(10),c352 VARCHAR(10),c353 VARCHAR(10),c354 VARCHAR(10),c355 VARCHAR(10),c356 VARCHAR(10),c357 VARCHAR(10),c358 VARCHAR(10),c359 VARCHAR(10),c360 VARCHAR(10),c361 VARCHAR(10),c362 VARCHAR(10),c363 VARCHAR(10),c364 VARCHAR(10),c365 VARCHAR(10),c366 VARCHAR(10),c367 VARCHAR(10),c368 VARCHAR(10),c369 VARCHAR(10),c370 VARCHAR(10),c371 VARCHAR(10),c372 VARCHAR(10),c373 VARCHAR(10),c374 VARCHAR(10),c375 VARCHAR(10),c376 VARCHAR(10),c377 VARCHAR(10),c378 VARCHAR(10),c379 VARCHAR(10),c380 VARCHAR(10),c381 VARCHAR(10),c382 VARCHAR(10),c383 VARCHAR(10),c384 VARCHAR(10),c385 VARCHAR(10),c386 VARCHAR(10),c387 VARCHAR(10),c388 VARCHAR(10),c389 VARCHAR(10),c390 VARCHAR(10),c391 VARCHAR(10),c392 VARCHAR(10),c393 VARCHAR(10),c394 VARCHAR(10),c395 VARCHAR(10),c396 VARCHAR(10),c397 VARCHAR(10),c398 VARCHAR(10),c399 VARCHAR(10),c400 VARCHAR(10),c401 VARCHAR(10),c402 VARCHAR(10),c403 VARCHAR(10),c404 VARCHAR(10),c405 VARCHAR(10),c406 VARCHAR(10),c407 VARCHAR(10),c408 VARCHAR(10),c409 VARCHAR(10),c410 VARCHAR(10),c411 VARCHAR(10),c412 VARCHAR(10),c413 VARCHAR(10),c414 VARCHAR(10),c415 VARCHAR(10),c416 VARCHAR(10),c417 VARCHAR(10),c418 VARCHAR(10),c419 VARCHAR(10),c420 VARCHAR(10),c421 VARCHAR(10),c422 VARCHAR(10),c423 VARCHAR(10),c424 VARCHAR(10),c425 VARCHAR(10),c426 VARCHAR(10),c427 VARCHAR(10),c428 VARCHAR(10),c429 VARCHAR(10),c430 VARCHAR(10),c431 VARCHAR(10),c432 VARCHAR(10),c433 VARCHAR(10),c434 VARCHAR(10),c435 VARCHAR(10),c436 VARCHAR(10),c437 VARCHAR(10),c438 VARCHAR(10),c439 VARCHAR(10),c440 VARCHAR(10),c441 VARCHAR(10),c442 VARCHAR(10),c443 VARCHAR(10),c444 VARCHAR(10),c445 VARCHAR(10),c446 VARCHAR(10),c447 VARCHAR(10),c448 VARCHAR(10),c449 VARCHAR(10),c450 VARCHAR(10),c451 VARCHAR(10),c452 VARCHAR(10),c453 VARCHAR(10),c454 VARCHAR(10),c455 VARCHAR(10),c456 VARCHAR(10),c457 VARCHAR(10),c458 VARCHAR(10),c459 VARCHAR(10),c460 VARCHAR(10),c461 VARCHAR(10),c462 VARCHAR(10),c463 VARCHAR(10),c464 VARCHAR(10),c465 VARCHAR(10),c466 VARCHAR(10),c467 VARCHAR(10),c468 VARCHAR(10),c469 VARCHAR(10),c470 VARCHAR(10),c471 VARCHAR(10),c472 VARCHAR(10),c473 VARCHAR(10),c474 VARCHAR(10),c475 VARCHAR(10),c476 VARCHAR(10),c477 VARCHAR(10),c478 VARCHAR(10),c479 VARCHAR(10),c480 VARCHAR(10),c481 VARCHAR(10),c482 VARCHAR(10),c483 VARCHAR(10),c484 VARCHAR(10),c485 VARCHAR(10),c486 VARCHAR(10),c487 VARCHAR(10),c488 VARCHAR(10),c489 VARCHAR(10),c490 VARCHAR(10),c491 VARCHAR(10),c492 VARCHAR(10),c493 VARCHAR(10),c494 VARCHAR(10),c495 VARCHAR(10),c496 VARCHAR(10),c497 VARCHAR(10),c498 VARCHAR(10),c499 VARCHAR(10),c500 VARCHAR(10),c501 VARCHAR(10),c502 VARCHAR(10),c503 VARCHAR(10),c504 VARCHAR(10),c505 VARCHAR(10),c506 VARCHAR(10),c507 VARCHAR(10),c508 VARCHAR(10),c509 VARCHAR(10),c510 VARCHAR(10),c511 VARCHAR(10),c512 VARCHAR(10),c513 VARCHAR(10),c514 VARCHAR(10),c515 VARCHAR(10),c516 VARCHAR(10),c517 VARCHAR(10),c518 VARCHAR(10),c519 VARCHAR(10),c520 VARCHAR(10),c521 VARCHAR(10),c522 VARCHAR(10),c523 VARCHAR(10),c524 VARCHAR(10),c525 VARCHAR(10),c526 VARCHAR(10),c527 VARCHAR(10),c528 VARCHAR(10),c529 VARCHAR(10),c530 VARCHAR(10),c531 VARCHAR(10),c532 VARCHAR(10),c533 VARCHAR(10),c534 VARCHAR(10),c535 VARCHAR(10),c536 VARCHAR(10),c537 VARCHAR(10),c538 VARCHAR(10),c539 VARCHAR(10),c540 VARCHAR(10),c541 VARCHAR(10),c542 VARCHAR(10),c543 VARCHAR(10),c544 VARCHAR(10),c545 VARCHAR(10),c546 VARCHAR(10),c547 VARCHAR(10),c548 VARCHAR(10),c549 VARCHAR(10),c550 VARCHAR(10),c551 VARCHAR(10),c552 VARCHAR(10),c553 VARCHAR(10),c554 VARCHAR(10),c555 VARCHAR(10),c556 VARCHAR(10),c557 VARCHAR(10),c558 VARCHAR(10),c559 VARCHAR(10),c560 VARCHAR(10),c561 VARCHAR(10),c562 VARCHAR(10),c563 VARCHAR(10),c564 VARCHAR(10),c565 VARCHAR(10),c566 VARCHAR(10),c567 VARCHAR(10),c568 VARCHAR(10),c569 VARCHAR(10),c570 VARCHAR(10),c571 VARCHAR(10),c572 VARCHAR(10),c573 VARCHAR(10),c574 VARCHAR(10),c575 VARCHAR(10),c576 VARCHAR(10),c577 VARCHAR(10),c578 VARCHAR(10),c579 VARCHAR(10),c580 VARCHAR(10),c581 VARCHAR(10),c582 VARCHAR(10),c583 VARCHAR(10),c584 VARCHAR(10),c585 VARCHAR(10),c586 VARCHAR(10),c587 VARCHAR(10),c588 VARCHAR(10),c589 VARCHAR(10),c590 VARCHAR(10),c591 VARCHAR(10),c592 VARCHAR(10),c593 VARCHAR(10),c594 VARCHAR(10),c595 VARCHAR(10),c596 VARCHAR(10),c597 VARCHAR(10),c598 VARCHAR(10),c599 VARCHAR(10),c600 VARCHAR(10),c601 VARCHAR(10),c602 VARCHAR(10),c603 VARCHAR(10),c604 VARCHAR(10),c605 VARCHAR(10),c606 VARCHAR(10),c607 VARCHAR(10),c608 VARCHAR(10),c609 VARCHAR(10),c610 VARCHAR(10),c611 VARCHAR(10),c612 VARCHAR(10),c613 VARCHAR(10),c614 VARCHAR(10),c615 VARCHAR(10),c616 VARCHAR(10),c617 VARCHAR(10),c618 VARCHAR(10),c619 VARCHAR(10),c620 VARCHAR(10),c621 VARCHAR(10),c622 VARCHAR(10),c623 VARCHAR(10),c624 VARCHAR(10),c625 VARCHAR(10),c626 VARCHAR(10),c627 VARCHAR(10),c628 VARCHAR(10),c629 VARCHAR(10),c630 VARCHAR(10),c631 VARCHAR(10),c632 VARCHAR(10),c633 VARCHAR(10),c634 VARCHAR(10),c635 VARCHAR(10),c636 VARCHAR(10),c637 VARCHAR(10),c638 VARCHAR(10),c639 VARCHAR(10),c640 VARCHAR(10),c641 VARCHAR(10),c642 VARCHAR(10),c643 VARCHAR(10),c644 VARCHAR(10),c645 VARCHAR(10),c646 VARCHAR(10),c647 VARCHAR(10),c648 VARCHAR(10),c649 VARCHAR(10),c650 VARCHAR(10),c651 VARCHAR(10),c652 VARCHAR(10),c653 VARCHAR(10),c654 VARCHAR(10),c655 VARCHAR(10),c656 VARCHAR(10),c657 VARCHAR(10),c658 VARCHAR(10),c659 VARCHAR(10),c660 VARCHAR(10),c661 VARCHAR(10),c662 VARCHAR(10),c663 VARCHAR(10),c664 VARCHAR(10),c665 VARCHAR(10),c666 VARCHAR(10),c667 VARCHAR(10),c668 VARCHAR(10),c669 VARCHAR(10),c670 VARCHAR(10),c671 VARCHAR(10),c672 VARCHAR(10),c673 VARCHAR(10),c674 VARCHAR(10),c675 VARCHAR(10),c676 VARCHAR(10),c677 VARCHAR(10),c678 VARCHAR(10),c679 VARCHAR(10),c680 VARCHAR(10),c681 VARCHAR(10),c682 VARCHAR(10),c683 VARCHAR(10),c684 VARCHAR(10),c685 VARCHAR(10),c686 VARCHAR(10),c687 VARCHAR(10),c688 VARCHAR(10),c689 VARCHAR(10),c690 VARCHAR(10),c691 VARCHAR(10),c692 VARCHAR(10),c693 VARCHAR(10),c694 VARCHAR(10),c695 VARCHAR(10),c696 VARCHAR(10),c697 VARCHAR(10),c698 VARCHAR(10),c699 VARCHAR(10),c700 VARCHAR(10),c701 VARCHAR(10),c702 VARCHAR(10),c703 VARCHAR(10),c704 VARCHAR(10),c705 VARCHAR(10),c706 VARCHAR(10),c707 VARCHAR(10),c708 VARCHAR(10),c709 VARCHAR(10),c710 VARCHAR(10),c711 VARCHAR(10),c712 VARCHAR(10),c713 VARCHAR(10),c714 VARCHAR(10),c715 VARCHAR(10),c716 VARCHAR(10),c717 VARCHAR(10),c718 VARCHAR(10),c719 VARCHAR(10),c720 VARCHAR(10),c721 VARCHAR(10),c722 VARCHAR(10),c723 VARCHAR(10),c724 VARCHAR(10),c725 VARCHAR(10),c726 VARCHAR(10),c727 VARCHAR(10),c728 VARCHAR(10),c729 VARCHAR(10),c730 VARCHAR(10),c731 VARCHAR(10),c732 VARCHAR(10),c733 VARCHAR(10),c734 VARCHAR(10),c735 VARCHAR(10),c736 VARCHAR(10),c737 VARCHAR(10),c738 VARCHAR(10),c739 VARCHAR(10),c740 VARCHAR(10),c741 VARCHAR(10),c742 VARCHAR(10),c743 VARCHAR(10),c744 VARCHAR(10),c745 VARCHAR(10),c746 VARCHAR(10),c747 VARCHAR(10),c748 VARCHAR(10),c749 VARCHAR(10),c750 VARCHAR(10),c751 VARCHAR(10),c752 VARCHAR(10),c753 VARCHAR(10),c754 VARCHAR(10),c755 VARCHAR(10),c756 VARCHAR(10),c757 VARCHAR(10),c758 VARCHAR(10),c759 VARCHAR(10),c760 VARCHAR(10),c761 VARCHAR(10),c762 VARCHAR(10),c763 VARCHAR(10),c764 VARCHAR(10),c765 VARCHAR(10),c766 VARCHAR(10),c767 VARCHAR(10),c768 VARCHAR(10),c769 VARCHAR(10),c770 VARCHAR(10),c771 VARCHAR(10),c772 VARCHAR(10),c773 VARCHAR(10),c774 VARCHAR(10),c775 VARCHAR(10),c776 VARCHAR(10),c777 VARCHAR(10),c778 VARCHAR(10),c779 VARCHAR(10),c780 VARCHAR(10),c781 VARCHAR(10),c782 VARCHAR(10),c783 VARCHAR(10),c784 VARCHAR(10),c785 VARCHAR(10),c786 VARCHAR(10),c787 VARCHAR(10),c788 VARCHAR(10),c789 VARCHAR(10),c790 VARCHAR(10),c791 VARCHAR(10),c792 VARCHAR(10),c793 VARCHAR(10),c794 VARCHAR(10),c795 VARCHAR(10),c796 VARCHAR(10),c797 VARCHAR(10),c798 VARCHAR(10),c799 VARCHAR(10),c800 VARCHAR(10),c801 VARCHAR(10),c802 VARCHAR(10),c803 VARCHAR(10),c804 VARCHAR(10),c805 VARCHAR(10),c806 VARCHAR(10),c807 VARCHAR(10),c808 VARCHAR(10),c809 VARCHAR(10),c810 VARCHAR(10),c811 VARCHAR(10),c812 VARCHAR(10),c813 VARCHAR(10),c814 VARCHAR(10),c815 VARCHAR(10),c816 VARCHAR(10),c817 VARCHAR(10),c818 VARCHAR(10),c819 VARCHAR(10),c820 VARCHAR(10),c821 VARCHAR(10),c822 VARCHAR(10),c823 VARCHAR(10),c824 VARCHAR(10),c825 VARCHAR(10),c826 VARCHAR(10),c827 VARCHAR(10),c828 VARCHAR(10),c829 VARCHAR(10),c830 VARCHAR(10),c831 VARCHAR(10),c832 VARCHAR(10),c833 VARCHAR(10),c834 VARCHAR(10),c835 VARCHAR(10),c836 VARCHAR(10),c837 VARCHAR(10),c838 VARCHAR(10),c839 VARCHAR(10),c840 VARCHAR(10),c841 VARCHAR(10),c842 VARCHAR(10),c843 VARCHAR(10),c844 VARCHAR(10),c845 VARCHAR(10),c846 VARCHAR(10),c847 VARCHAR(10),c848 VARCHAR(10),c849 VARCHAR(10),c850 VARCHAR(10),c851 VARCHAR(10),c852 VARCHAR(10),c853 VARCHAR(10),c854 VARCHAR(10),c855 VARCHAR(10),c856 VARCHAR(10),c857 VARCHAR(10),c858 VARCHAR(10),c859 VARCHAR(10),c860 VARCHAR(10),c861 VARCHAR(10),c862 VARCHAR(10),c863 VARCHAR(10),c864 VARCHAR(10),c865 VARCHAR(10),c866 VARCHAR(10),c867 VARCHAR(10),c868 VARCHAR(10),c869 VARCHAR(10),c870 VARCHAR(10),c871 VARCHAR(10),c872 VARCHAR(10),c873 VARCHAR(10),c874 VARCHAR(10),c875 VARCHAR(10),c876 VARCHAR(10),c877 VARCHAR(10),c878 VARCHAR(10),c879 VARCHAR(10),c880 VARCHAR(10),c881 VARCHAR(10),c882 VARCHAR(10),c883 VARCHAR(10),c884 VARCHAR(10),c885 VARCHAR(10),c886 VARCHAR(10),c887 VARCHAR(10),c888 VARCHAR(10),c889 VARCHAR(10),c890 VARCHAR(10),c891 VARCHAR(10),c892 VARCHAR(10),c893 VARCHAR(10),c894 VARCHAR(10),c895 VARCHAR(10),c896 VARCHAR(10),c897 VARCHAR(10),c898 VARCHAR(10),c899 VARCHAR(10),c900 VARCHAR(10),c901 VARCHAR(10),c902 VARCHAR(10),c903 VARCHAR(10),c904 VARCHAR(10),c905 VARCHAR(10),c906 VARCHAR(10),c907 VARCHAR(10),c908 VARCHAR(10),c909 VARCHAR(10),c910 VARCHAR(10),c911 VARCHAR(10),c912 VARCHAR(10),c913 VARCHAR(10),c914 VARCHAR(10),c915 VARCHAR(10),c916 VARCHAR(10),c917 VARCHAR(10),c918 VARCHAR(10),c919 VARCHAR(10),c920 VARCHAR(10),c921 VARCHAR(10),c922 VARCHAR(10),c923 VARCHAR(10),c924 VARCHAR(10),c925 VARCHAR(10),c926 VARCHAR(10),c927 VARCHAR(10),c928 VARCHAR(10),c929 VARCHAR(10),c930 VARCHAR(10),c931 VARCHAR(10),c932 VARCHAR(10),c933 VARCHAR(10),c934 VARCHAR(10),c935 VARCHAR(10),c936 VARCHAR(10),c937 VARCHAR(10),c938 VARCHAR(10),c939 VARCHAR(10),c940 VARCHAR(10),c941 VARCHAR(10),c942 VARCHAR(10),c943 VARCHAR(10),c944 VARCHAR(10),c945 VARCHAR(10),c946 VARCHAR(10),c947 VARCHAR(10),c948 VARCHAR(10),c949 VARCHAR(10),c950 VARCHAR(10),c951 VARCHAR(10),c952 VARCHAR(10),c953 VARCHAR(10),c954 VARCHAR(10),c955 VARCHAR(10),c956 VARCHAR(10),c957 VARCHAR(10),c958 VARCHAR(10),c959 VARCHAR(10),c960 VARCHAR(10),c961 VARCHAR(10),c962 VARCHAR(10),c963 VARCHAR(10),c964 VARCHAR(10),c965 VARCHAR(10),c966 VARCHAR(10),c967 VARCHAR(10),c968 VARCHAR(10),c969 VARCHAR(10),c970 VARCHAR(10),c971 VARCHAR(10),c972 VARCHAR(10),c973 VARCHAR(10),c974 VARCHAR(10),c975 VARCHAR(10),c976 VARCHAR(10),c977 VARCHAR(10),c978 VARCHAR(10),c979 VARCHAR(10),c980 VARCHAR(10),c981 VARCHAR(10),c982 VARCHAR(10),c983 VARCHAR(10),c984 VARCHAR(10),c985 VARCHAR(10),c986 VARCHAR(10),c987 VARCHAR(10),c988 VARCHAR(10),c989 VARCHAR(10),c990 VARCHAR(10),c991 VARCHAR(10),c992 VARCHAR(10),c993 VARCHAR(10),c994 VARCHAR(10),c995 VARCHAR(10),c996 VARCHAR(10),c997 VARCHAR(10),c998 VARCHAR(10),c999 VARCHAR(10),c1000 VARCHAR(10) ) ;
+Warnings:
+Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline.
INSERT INTO t1(c1) VALUES('abc');
SELECT * FROM t1;
c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16 c17 c18 c19 c20 c21 c22 c23 c24 c25 c26 c27 c28 c29 c30 c31 c32 c33 c34 c35 c36 c37 c38 c39 c40 c41 c42 c43 c44 c45 c46 c47 c48 c49 c50 c51 c52 c53 c54 c55 c56 c57 c58 c59 c60 c61 c62 c63 c64 c65 c66 c67 c68 c69 c70 c71 c72 c73 c74 c75 c76 c77 c78 c79 c80 c81 c82 c83 c84 c85 c86 c87 c88 c89 c90 c91 c92 c93 c94 c95 c96 c97 c98 c99 c100 c101 c102 c103 c104 c105 c106 c107 c108 c109 c110 c111 c112 c113 c114 c115 c116 c117 c118 c119 c120 c121 c122 c123 c124 c125 c126 c127 c128 c129 c130 c131 c132 c133 c134 c135 c136 c137 c138 c139 c140 c141 c142 c143 c144 c145 c146 c147 c148 c149 c150 c151 c152 c153 c154 c155 c156 c157 c158 c159 c160 c161 c162 c163 c164 c165 c166 c167 c168 c169 c170 c171 c172 c173 c174 c175 c176 c177 c178 c179 c180 c181 c182 c183 c184 c185 c186 c187 c188 c189 c190 c191 c192 c193 c194 c195 c196 c197 c198 c199 c200 c201 c202 c203 c204 c205 c206 c207 c208 c209 c210 c211 c212 c213 c214 c215 c216 c217 c218 c219 c220 c221 c222 c223 c224 c225 c226 c227 c228 c229 c230 c231 c232 c233 c234 c235 c236 c237 c238 c239 c240 c241 c242 c243 c244 c245 c246 c247 c248 c249 c250 c251 c252 c253 c254 c255 c256 c257 c258 c259 c260 c261 c262 c263 c264 c265 c266 c267 c268 c269 c270 c271 c272 c273 c274 c275 c276 c277 c278 c279 c280 c281 c282 c283 c284 c285 c286 c287 c288 c289 c290 c291 c292 c293 c294 c295 c296 c297 c298 c299 c300 c301 c302 c303 c304 c305 c306 c307 c308 c309 c310 c311 c312 c313 c314 c315 c316 c317 c318 c319 c320 c321 c322 c323 c324 c325 c326 c327 c328 c329 c330 c331 c332 c333 c334 c335 c336 c337 c338 c339 c340 c341 c342 c343 c344 c345 c346 c347 c348 c349 c350 c351 c352 c353 c354 c355 c356 c357 c358 c359 c360 c361 c362 c363 c364 c365 c366 c367 c368 c369 c370 c371 c372 c373 c374 c375 c376 c377 c378 c379 c380 c381 c382 c383 c384 c385 c386 c387 c388 c389 c390 c391 c392 c393 c394 c395 c396 c397 c398 c399 c400 c401 c402 c403 c404 c405 c406 c407 c408 c409 c410 c411 c412 c413 c414 c415 c416 c417 c418 c419 c420 c421 c422 c423 c424 c425 c426 c427 c428 c429 c430 c431 c432 c433 c434 c435 c436 c437 c438 c439 c440 c441 c442 c443 c444 c445 c446 c447 c448 c449 c450 c451 c452 c453 c454 c455 c456 c457 c458 c459 c460 c461 c462 c463 c464 c465 c466 c467 c468 c469 c470 c471 c472 c473 c474 c475 c476 c477 c478 c479 c480 c481 c482 c483 c484 c485 c486 c487 c488 c489 c490 c491 c492 c493 c494 c495 c496 c497 c498 c499 c500 c501 c502 c503 c504 c505 c506 c507 c508 c509 c510 c511 c512 c513 c514 c515 c516 c517 c518 c519 c520 c521 c522 c523 c524 c525 c526 c527 c528 c529 c530 c531 c532 c533 c534 c535 c536 c537 c538 c539 c540 c541 c542 c543 c544 c545 c546 c547 c548 c549 c550 c551 c552 c553 c554 c555 c556 c557 c558 c559 c560 c561 c562 c563 c564 c565 c566 c567 c568 c569 c570 c571 c572 c573 c574 c575 c576 c577 c578 c579 c580 c581 c582 c583 c584 c585 c586 c587 c588 c589 c590 c591 c592 c593 c594 c595 c596 c597 c598 c599 c600 c601 c602 c603 c604 c605 c606 c607 c608 c609 c610 c611 c612 c613 c614 c615 c616 c617 c618 c619 c620 c621 c622 c623 c624 c625 c626 c627 c628 c629 c630 c631 c632 c633 c634 c635 c636 c637 c638 c639 c640 c641 c642 c643 c644 c645 c646 c647 c648 c649 c650 c651 c652 c653 c654 c655 c656 c657 c658 c659 c660 c661 c662 c663 c664 c665 c666 c667 c668 c669 c670 c671 c672 c673 c674 c675 c676 c677 c678 c679 c680 c681 c682 c683 c684 c685 c686 c687 c688 c689 c690 c691 c692 c693 c694 c695 c696 c697 c698 c699 c700 c701 c702 c703 c704 c705 c706 c707 c708 c709 c710 c711 c712 c713 c714 c715 c716 c717 c718 c719 c720 c721 c722 c723 c724 c725 c726 c727 c728 c729 c730 c731 c732 c733 c734 c735 c736 c737 c738 c739 c740 c741 c742 c743 c744 c745 c746 c747 c748 c749 c750 c751 c752 c753 c754 c755 c756 c757 c758 c759 c760 c761 c762 c763 c764 c765 c766 c767 c768 c769 c770 c771 c772 c773 c774 c775 c776 c777 c778 c779 c780 c781 c782 c783 c784 c785 c786 c787 c788 c789 c790 c791 c792 c793 c794 c795 c796 c797 c798 c799 c800 c801 c802 c803 c804 c805 c806 c807 c808 c809 c810 c811 c812 c813 c814 c815 c816 c817 c818 c819 c820 c821 c822 c823 c824 c825 c826 c827 c828 c829 c830 c831 c832 c833 c834 c835 c836 c837 c838 c839 c840 c841 c842 c843 c844 c845 c846 c847 c848 c849 c850 c851 c852 c853 c854 c855 c856 c857 c858 c859 c860 c861 c862 c863 c864 c865 c866 c867 c868 c869 c870 c871 c872 c873 c874 c875 c876 c877 c878 c879 c880 c881 c882 c883 c884 c885 c886 c887 c888 c889 c890 c891 c892 c893 c894 c895 c896 c897 c898 c899 c900 c901 c902 c903 c904 c905 c906 c907 c908 c909 c910 c911 c912 c913 c914 c915 c916 c917 c918 c919 c920 c921 c922 c923 c924 c925 c926 c927 c928 c929 c930 c931 c932 c933 c934 c935 c936 c937 c938 c939 c940 c941 c942 c943 c944 c945 c946 c947 c948 c949 c950 c951 c952 c953 c954 c955 c956 c957 c958 c959 c960 c961 c962 c963 c964 c965 c966 c967 c968 c969 c970 c971 c972 c973 c974 c975 c976 c977 c978 c979 c980 c981 c982 c983 c984 c985 c986 c987 c988 c989 c990 c991 c992 c993 c994 c995 c996 c997 c998 c999 c1000
diff --git a/mysql-test/suite/engines/funcs/r/ix_unique_lob.result b/mysql-test/suite/engines/funcs/r/ix_unique_lob.result
index 4554a912906..1b03012a96f 100644
--- a/mysql-test/suite/engines/funcs/r/ix_unique_lob.result
+++ b/mysql-test/suite/engines/funcs/r/ix_unique_lob.result
@@ -7,7 +7,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` tinyblob,
+ `c1` tinyblob DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -21,7 +21,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` blob,
+ `c1` blob DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -35,7 +35,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` mediumblob,
+ `c1` mediumblob DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -49,7 +49,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` longblob,
+ `c1` longblob DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -63,7 +63,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` tinytext,
+ `c1` tinytext DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -77,7 +77,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` text,
+ `c1` text DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -91,7 +91,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` mediumtext,
+ `c1` mediumtext DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
@@ -105,7 +105,7 @@ t4
SHOW CREATE TABLE t4;
Table Create Table
t4 CREATE TABLE `t4` (
- `c1` longtext,
+ `c1` longtext DEFAULT NULL,
UNIQUE KEY `i1` (`c1`(100))
) ENGINE=ENGINE DEFAULT CHARSET=latin1
DROP TABLE t4;
diff --git a/mysql-test/suite/engines/funcs/r/rpl000011.result b/mysql-test/suite/engines/funcs/r/rpl000011.result
deleted file mode 100644
index dd0fa2fbe74..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl000011.result
+++ /dev/null
@@ -1,16 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-create table t1 (n int);
-insert into t1 values(1);
-stop slave;
-start slave;
-insert into t1 values(2);
-select * from t1;
-n
-1
-2
-drop table t1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl000017.result b/mysql-test/suite/engines/funcs/r/rpl000017.result
deleted file mode 100644
index 1c611357e64..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl000017.result
+++ /dev/null
@@ -1,18 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
-grant replication slave on *.* to replicate@localhost identified by 'aaaaaaaaaaaaaaab';
-grant replication slave on *.* to replicate@127.0.0.1 identified by 'aaaaaaaaaaaaaaab';
-start slave;
-drop table if exists t1;
-create table t1(n int);
-insert into t1 values(24);
-select * from t1;
-n
-24
-drop table t1;
-delete from mysql.user where user="replicate";
diff --git a/mysql-test/suite/engines/funcs/r/rpl000010.result b/mysql-test/suite/engines/funcs/r/rpl_000010.result
index 65191ea411f..ae989f25e1b 100644
--- a/mysql-test/suite/engines/funcs/r/rpl000010.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_000010.result
@@ -1,14 +1,14 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1 (n int not null auto_increment primary key);
insert into t1 values(NULL);
insert into t1 values(2);
+connection slave;
select n from t1;
n
1
2
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_000011.result b/mysql-test/suite/engines/funcs/r/rpl_000011.result
new file mode 100644
index 00000000000..83bc0c207ce
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/r/rpl_000011.result
@@ -0,0 +1,18 @@
+include/master-slave.inc
+[connection master]
+create table t1 (n int);
+insert into t1 values(1);
+connection slave;
+include/stop_slave.inc
+include/start_slave.inc
+connection master;
+insert into t1 values(2);
+connection slave;
+select * from t1;
+n
+1
+2
+connection master;
+drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl000013.result b/mysql-test/suite/engines/funcs/r/rpl_000013.result
index 37838bb88e0..87103d7821f 100644
--- a/mysql-test/suite/engines/funcs/r/rpl000013.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_000013.result
@@ -1,17 +1,19 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t2(n int);
create temporary table t1 (n int);
insert into t1 values(1),(2),(3);
insert into t2 select * from t1;
+connection master1;
create temporary table t1 (n int);
insert into t1 values (4),(5);
insert into t2 select * from t1 as t10;
+disconnect master;
+connection slave;
+connection master1;
insert into t2 values(6);
+disconnect master1;
+connection slave;
select * from t2;
n
1
@@ -23,6 +25,9 @@ n
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 0
+connect master2,localhost,root,,;
+connection master2;
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_000015.result b/mysql-test/suite/engines/funcs/r/rpl_000015.result
index bb3620ebc84..56fc002ac53 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_000015.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_000015.result
@@ -1,192 +1,46 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
reset master;
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000001 107
-stop slave;
+master-bin.000001 329
+connection slave;
+include/stop_slave.inc
reset slave;
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_PORT
-Connect_Retry #
-Master_Log_File
-Read_Master_Log_Pos 4
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File
-Slave_IO_Running No
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 0
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
+Slave_IO_Running = 'No'
+Slave_SQL_Running = 'No'
+Last_SQL_Errno = '0'
+Last_SQL_Error = ''
+Exec_Master_Log_Pos = '0'
change master to master_host='127.0.0.1';
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_PORT
-Connect_Retry #
-Master_Log_File
-Read_Master_Log_Pos 4
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File
-Slave_IO_Running No
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 0
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
-change master to master_host='127.0.0.1',master_user='root',
+Slave_IO_Running = 'No'
+Slave_SQL_Running = 'No'
+Last_SQL_Errno = '0'
+Last_SQL_Error = ''
+Exec_Master_Log_Pos = '0'
+change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=MASTER_PORT;
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_PORT
-Connect_Retry #
-Master_Log_File
-Read_Master_Log_Pos 4
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File
-Slave_IO_Running No
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 0
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
-start slave;
-show slave status;
-Slave_IO_State Waiting for master to send event
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_PORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos 107
-Relay_Log_File slave-relay-bin.000002
-Relay_Log_Pos 254
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running Yes
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 107
-Relay_Log_Space 410
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
-drop table if exists t1;
+include/start_slave.inc
+Slave_IO_Running = 'Yes'
+Slave_SQL_Running = 'Yes'
+Last_SQL_Errno = '0'
+Last_SQL_Error = ''
+Exec_Master_Log_Pos = '329'
+connection master;
create table t1 (n int, PRIMARY KEY(n));
insert into t1 values (10),(45),(90);
+connection slave;
SELECT * FROM t1 ORDER BY n;
-n 10
-n 45
-n 90
+n
+10
+45
+90
+connection master;
SELECT * FROM t1 ORDER BY n;
-n 10
-n 45
-n 90
+n
+10
+45
+90
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl000017.test b/mysql-test/suite/engines/funcs/r/rpl_000017.result
index 2ba321cd8c3..d1c453208c7 100644
--- a/mysql-test/suite/engines/funcs/t/rpl000017.test
+++ b/mysql-test/suite/engines/funcs/r/rpl_000017.result
@@ -1,22 +1,19 @@
-source include/master-slave.inc;
+include/master-slave.inc
+[connection master]
connection slave;
-stop slave;
connection master;
grant replication slave on *.* to replicate@localhost identified by 'aaaaaaaaaaaaaaab';
grant replication slave on *.* to replicate@127.0.0.1 identified by 'aaaaaaaaaaaaaaab';
connection slave;
-start slave;
+include/start_slave.inc
connection master;
---disable_warnings
-drop table if exists t1;
---enable_warnings
create table t1(n int);
insert into t1 values(24);
-sync_slave_with_master;
+connection slave;
select * from t1;
+n
+24
connection master;
drop table t1;
delete from mysql.user where user="replicate";
-sync_slave_with_master;
-
-# End of 4.1 tests
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_LD_INFILE.result b/mysql-test/suite/engines/funcs/r/rpl_LD_INFILE.result
index b092dd9e088..7204122b669 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_LD_INFILE.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_LD_INFILE.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1 (a VARCHAR(255), PRIMARY KEY(a));
LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
@@ -62,6 +59,8 @@ abasements
abasement
abased
abase
+connection slave;
+connection slave;
SELECT * FROM test.t1 ORDER BY a DESC;
a
aberration
@@ -114,4 +113,7 @@ abasements
abasement
abased
abase
+connection master;
DROP TABLE test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_REDIRECT.result b/mysql-test/suite/engines/funcs/r/rpl_REDIRECT.result
deleted file mode 100644
index 7a901b65810..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl_REDIRECT.result
+++ /dev/null
@@ -1,41 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-SHOW SLAVE STATUS;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error
-SHOW SLAVE HOSTS;
-Server_id Host Port Rpl_recovery_rank Master_id
-2 127.0.0.1 SLAVE_PORT 0 1
-create table t1 ( n int);
-insert into t1 values (1),(2),(3),(4);
-insert into t1 values(5);
-SELECT * FROM t1 ORDER BY n;
-n
-1
-2
-3
-4
-5
-SELECT * FROM t1 ORDER BY n;
-n
-1
-2
-3
-4
-SELECT * FROM t1 ORDER BY n;
-n
-1
-2
-3
-4
-SELECT * FROM t1 ORDER BY n;
-n
-1
-2
-3
-4
-5
-drop table t1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_alter,innodb.rdiff b/mysql-test/suite/engines/funcs/r/rpl_alter,innodb.rdiff
new file mode 100644
index 00000000000..0c70e2c0cfc
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/r/rpl_alter,innodb.rdiff
@@ -0,0 +1,72 @@
+@@ -29,7 +29,7 @@
+ `a` int(10) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ insert into t1 (a) values (1),((1<<32)-1);
+ select * from t1;
+ a b
+@@ -42,7 +42,7 @@
+ `a` bigint(20) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ select * from t1;
+ a b
+ 1 NULL
+@@ -54,7 +54,7 @@
+ `a` int(10) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ select * from t1;
+ a b
+ 1 NULL
+@@ -66,7 +66,7 @@
+ `a` bigint(20) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ select * from t1;
+ a b
+ 1 NULL
+@@ -83,7 +83,7 @@
+ `a` bigint(20) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ connection master;
+ create table t2 (a int unsigned auto_increment primary key, b int);
+ show create table t2;
+@@ -92,7 +92,7 @@
+ `a` int(10) unsigned NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ alter table t2 modify a bigint;
+ show create table t2;
+ Table Create Table
+@@ -100,7 +100,7 @@
+ `a` bigint(20) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ alter table t2 modify a bigint auto_increment;
+ show create table t2;
+ Table Create Table
+@@ -108,7 +108,7 @@
+ `a` bigint(20) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+-) ENGINE=MyISAM DEFAULT CHARSET=latin1
++) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ drop table t1,t2;
+ #
+ # MDEV-8432: Slave cannot replicate signed integer-type values
diff --git a/mysql-test/suite/engines/funcs/r/rpl_alter.result b/mysql-test/suite/engines/funcs/r/rpl_alter.result
index 6ef5ce3462a..4f709db9624 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_alter.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_alter.result
@@ -1,10 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-drop database if exists mysqltest;
+include/master-slave.inc
+[connection master]
create database mysqltest;
create table mysqltest.t1 ( n int);
alter table mysqltest.t1 add m int;
@@ -12,10 +7,126 @@ insert into mysqltest.t1 values (1,2);
create table mysqltest.t2 (n int);
insert into mysqltest.t2 values (45);
rename table mysqltest.t2 to mysqltest.t3, mysqltest.t1 to mysqltest.t2;
+connection slave;
select * from mysqltest.t2;
n m
1 2
select * from mysqltest.t3;
n
45
+connection master;
drop database mysqltest;
+connection slave;
+connection master;
+use test;
+#
+# Test bug where ALTER TABLE MODIFY didn't replicate properly
+#
+create table t1 (a int unsigned primary key, b int);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(10) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 (a) values (1),((1<<32)-1);
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+alter table t1 modify a bigint;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+alter table t1 modify a int unsigned;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(10) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+alter table t1 modify a bigint unsigned;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+connection slave;
+use test;
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) unsigned NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+connection master;
+create table t2 (a int unsigned auto_increment primary key, b int);
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(10) unsigned NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t2 modify a bigint;
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` bigint(20) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t2 modify a bigint auto_increment;
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` bigint(20) NOT NULL AUTO_INCREMENT,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1,t2;
+#
+# MDEV-8432: Slave cannot replicate signed integer-type values
+# with high bit set to 1
+# Test replication when we have int on master and bigint on slave
+#
+create table t1 (a int unsigned primary key, b int);
+connection slave;
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='ALL_NON_LOSSY';
+alter table t1 modify a bigint unsigned;
+connection master;
+insert into t1 (a) values (1),((1<<32)-1);
+connection slave;
+select * from t1;
+a b
+1 NULL
+4294967295 NULL
+SET GLOBAL SLAVE_TYPE_CONVERSIONS='';
+connection master;
+drop table t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_alter_db.result b/mysql-test/suite/engines/funcs/r/rpl_alter_db.result
index 80b1a0e983f..56e82712bec 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_alter_db.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_alter_db.result
@@ -1,9 +1,12 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-use mysql;
+include/master-slave.inc
+[connection master]
+==== Verify that alter database does not stop replication ====
+create database temp_db;
+use temp_db;
alter database collate latin1_bin;
-alter database collate latin1_swedish_ci;
+connection slave;
+==== Clean up ====
+connection master;
+drop database temp_db;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_bit.result b/mysql-test/suite/engines/funcs/r/rpl_bit.result
index 924de8a65f8..b83faa4a28a 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_bit.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_bit.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1 (
dummyKey INTEGER NOT NULL,
@@ -95,6 +92,7 @@ hex(bit3)
15
24
35
+connection slave;
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
oSupp sSuppD GSuppDf VNotSupp x034
5 5 3 2 1
@@ -135,4 +133,7 @@ hex(bit3)
15
24
35
+connection master;
DROP TABLE IF EXISTS test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_bit_npk.result b/mysql-test/suite/engines/funcs/r/rpl_bit_npk.result
index 9599660f18f..5c7e6fc55b6 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_bit_npk.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_bit_npk.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1 (
dummyKey INTEGER NOT NULL,
@@ -98,6 +95,7 @@ hex(bit3)
15
24
35
+connection slave;
SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
FROM test.t1
ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
@@ -140,6 +138,7 @@ hex(bit3)
15
24
35
+connection master;
CREATE TABLE test.t2 (a INT, b BIT(1));
INSERT INTO test.t2 VALUES (1, b'0');
INSERT INTO test.t2 VALUES (1, b'1');
@@ -156,6 +155,7 @@ SELECT * FROM test.t3 ORDER BY a,b;
a b
1 NULL
2 0
+connection slave;
SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
a hex(b)
1 0
@@ -164,6 +164,9 @@ SELECT * FROM test.t3 ORDER BY a,b;
a b
1 NULL
2 0
+connection master;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
DROP TABLE IF EXISTS test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_change_master.result b/mysql-test/suite/engines/funcs/r/rpl_change_master.result
index c187f16cc83..48cec72d917 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_change_master.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_change_master.result
@@ -1,26 +1,29 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+call mtr.add_suppression("Slave I/O: The slave I/O thread stops because a fatal error is encountered when it tried to SET @master_binlog_checksum");
+connection master;
create table t1(n int);
+connection slave;
select * from t1;
n
stop slave sql_thread;
+connection master;
insert into t1 values(1);
insert into t1 values(2);
-stop slave;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 192 # None 0 No # No 0 0 1
+connection slave;
+include/wait_for_slave_param.inc [Read_Master_Log_Pos]
+include/stop_slave.inc
change master to master_user='root';
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_MYPORT 1 master-bin.000001 # # # master-bin.000001 No No 0 0 192 # None 0 No # No 0 0 1
start slave;
select * from t1;
n
1
2
+connection master;
drop table t1;
+connection slave;
+connection master;
+CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_CERT='',
+MASTER_SSL_KEY='', MASTER_SSL_CRL='', MASTER_SSL_CRLPATH='';
+CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_create_database.result b/mysql-test/suite/engines/funcs/r/rpl_create_database.result
index 15bb939eb57..28c11525d00 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_create_database.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_create_database.result
@@ -1,13 +1,11 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
DROP DATABASE IF EXISTS mysqltest_prometheus;
DROP DATABASE IF EXISTS mysqltest_sisyfos;
DROP DATABASE IF EXISTS mysqltest_bob;
+connection slave;
DROP DATABASE IF EXISTS mysqltest_bob;
+connection master;
CREATE DATABASE mysqltest_prometheus;
CREATE DATABASE mysqltest_sisyfos;
CREATE DATABASE mysqltest_bob;
@@ -20,25 +18,19 @@ INSERT INTO t2 VALUES(2);
ALTER DATABASE mysqltest_sisyfos CHARACTER SET latin1;
USE mysqltest_sisyfos;
ALTER DATABASE mysqltest_bob CHARACTER SET latin1;
-SHOW DATABASES;
-Database
-information_schema
-mtr
+SHOW DATABASES LIKE 'mysql%';
+Database (mysql%)
mysql
mysqltest_bob
mysqltest_prometheus
mysqltest_sisyfos
-performance_schema
-test
-SHOW DATABASES;
-Database
-information_schema
-mtr
+connection slave;
+SHOW DATABASES LIKE 'mysql%';
+Database (mysql%)
mysql
mysqltest_prometheus
mysqltest_sisyfos
-performance_schema
-test
+connection master;
DROP DATABASE IF EXISTS mysqltest_sisyfos;
USE mysqltest_prometheus;
CREATE TABLE t1 (a INT);
@@ -46,25 +38,18 @@ INSERT INTO t1 VALUES (1);
CREATE DATABASE mysqltest_sisyfos;
USE mysqltest_sisyfos;
CREATE TABLE t2 (a INT);
-SHOW DATABASES;
-Database
-information_schema
-mtr
+SHOW DATABASES LIKE 'mysql%';
+Database (mysql%)
mysql
mysqltest_bob
mysqltest_prometheus
mysqltest_sisyfos
-performance_schema
-test
-SHOW DATABASES;
-Database
-information_schema
-mtr
+connection slave;
+SHOW DATABASES LIKE 'mysql%';
+Database (mysql%)
mysql
mysqltest_prometheus
mysqltest_sisyfos
-performance_schema
-test
USE mysqltest_prometheus;
SHOW TABLES;
Tables_in_mysqltest_prometheus
@@ -73,6 +58,9 @@ USE mysqltest_sisyfos;
SHOW TABLES;
Tables_in_mysqltest_sisyfos
t2
+connection master;
DROP DATABASE IF EXISTS mysqltest_prometheus;
DROP DATABASE IF EXISTS mysqltest_sisyfos;
DROP DATABASE IF EXISTS mysqltest_bob;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_do_grant.result b/mysql-test/suite/engines/funcs/r/rpl_do_grant.result
index 50d181be0ca..e4b08800287 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_do_grant.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_do_grant.result
@@ -1,80 +1,320 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
+include/master-slave.inc
+[connection master]
+connection master;
+create user rpl_do_grant@localhost;
grant select on *.* to rpl_do_grant@localhost;
grant drop on test.* to rpl_do_grant@localhost;
+connection slave;
show grants for rpl_do_grant@localhost;
Grants for rpl_do_grant@localhost
-GRANT SELECT ON *.* TO 'rpl_do_grant'@'localhost'
-GRANT DROP ON `test`.* TO 'rpl_do_grant'@'localhost'
+GRANT SELECT ON *.* TO `rpl_do_grant`@`localhost`
+GRANT DROP ON `test`.* TO `rpl_do_grant`@`localhost`
+connection master;
set password for rpl_do_grant@localhost=password("does it work?");
-select password<>_binary'' from mysql.user where user=_binary'rpl_do_grant';
-password<>_binary''
+connection slave;
+select authentication_string<>'' from mysql.user where user='rpl_do_grant';
+authentication_string<>''
1
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
+connection master;
+update mysql.global_priv set priv=json_remove(priv, '$.authentication_string') where user='rpl_do_grant';
flush privileges;
+select authentication_string<>'' from mysql.user where user='rpl_do_grant';
+authentication_string<>''
+0
+set sql_mode='ANSI_QUOTES';
+set password for rpl_do_grant@localhost=password('does it work?');
+set sql_mode='';
+connection slave;
+select authentication_string<>'' from mysql.user where user='rpl_do_grant';
+authentication_string<>''
+1
+connection master;
+drop user rpl_do_grant@localhost;
+connection slave;
+connection master;
show grants for rpl_do_grant@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant' on host 'localhost'
+connection slave;
show grants for rpl_do_grant@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant' on host 'localhost'
+connection master;
create user rpl_do_grant@localhost;
show grants for rpl_do_grant@localhost;
Grants for rpl_do_grant@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant`@`localhost`
show grants for rpl_do_grant2@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant2' on host 'localhost'
+connection slave;
show grants for rpl_do_grant@localhost;
Grants for rpl_do_grant@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant`@`localhost`
show grants for rpl_do_grant2@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant2' on host 'localhost'
+connection master;
rename user rpl_do_grant@localhost to rpl_do_grant2@localhost;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+connection slave;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+connection master;
grant DELETE,INSERT on mysqltest1.* to rpl_do_grant2@localhost;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
-GRANT INSERT, DELETE ON `mysqltest1`.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+GRANT INSERT, DELETE ON `mysqltest1`.* TO `rpl_do_grant2`@`localhost`
+connection slave;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
-GRANT INSERT, DELETE ON `mysqltest1`.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+GRANT INSERT, DELETE ON `mysqltest1`.* TO `rpl_do_grant2`@`localhost`
+connection master;
revoke DELETE on mysqltest1.* from rpl_do_grant2@localhost;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
-GRANT INSERT ON `mysqltest1`.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+GRANT INSERT ON `mysqltest1`.* TO `rpl_do_grant2`@`localhost`
+connection slave;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
-GRANT INSERT ON `mysqltest1`.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+GRANT INSERT ON `mysqltest1`.* TO `rpl_do_grant2`@`localhost`
+connection master;
revoke all privileges, grant option from rpl_do_grant2@localhost;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+connection slave;
show grants for rpl_do_grant2@localhost;
Grants for rpl_do_grant2@localhost
-GRANT USAGE ON *.* TO 'rpl_do_grant2'@'localhost'
+GRANT USAGE ON *.* TO `rpl_do_grant2`@`localhost`
+connection master;
drop user rpl_do_grant2@localhost;
show grants for rpl_do_grant2@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant2' on host 'localhost'
+connection slave;
show grants for rpl_do_grant2@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant2' on host 'localhost'
+connection master;
+call mtr.add_suppression("Slave: Operation DROP USER failed for 'create_rout_db'@'localhost' error.* 1396");
+connection slave;
+connection master;
+DROP DATABASE IF EXISTS bug42217_db;
+CREATE DATABASE bug42217_db;
+GRANT CREATE ROUTINE ON bug42217_db.* TO 'create_rout_db'@'localhost'
+ IDENTIFIED BY 'create_rout_db' WITH GRANT OPTION;
+connection slave;
+connection master;
+connect create_rout_db_master, localhost, create_rout_db, create_rout_db, bug42217_db,$MASTER_MYPORT,;
+connect create_rout_db_slave, localhost, create_rout_db, create_rout_db, bug42217_db, $SLAVE_MYPORT,;
+connection create_rout_db_master;
+USE bug42217_db;
+CREATE FUNCTION upgrade_del_func() RETURNS CHAR(30)
+BEGIN
+RETURN "INSIDE upgrade_del_func()";
+END//
+connection master;
+USE bug42217_db;
+SELECT * FROM mysql.procs_priv;
+Host Db User Routine_name Routine_type Grantor Proc_priv Timestamp
+localhost bug42217_db create_rout_db upgrade_del_func FUNCTION create_rout_db@localhost Execute,Alter Routine #
+SELECT upgrade_del_func();
+upgrade_del_func()
+INSIDE upgrade_del_func()
+connection slave;
+SELECT * FROM mysql.procs_priv;
+Host Db User Routine_name Routine_type Grantor Proc_priv Timestamp
+localhost bug42217_db create_rout_db upgrade_del_func FUNCTION create_rout_db@localhost Execute,Alter Routine #
+SHOW GRANTS FOR 'create_rout_db'@'localhost';
+Grants for create_rout_db@localhost
+GRANT USAGE ON *.* TO `create_rout_db`@`localhost` IDENTIFIED BY PASSWORD '*08792480350CBA057BDE781B9DF183B263934601'
+GRANT CREATE ROUTINE ON `bug42217_db`.* TO `create_rout_db`@`localhost` WITH GRANT OPTION
+GRANT EXECUTE, ALTER ROUTINE ON FUNCTION `bug42217_db`.`upgrade_del_func` TO `create_rout_db`@`localhost`
+USE bug42217_db;
+SHOW CREATE FUNCTION upgrade_del_func;
+Function sql_mode Create Function character_set_client collation_connection Database Collation
+upgrade_del_func STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`create_rout_db`@`localhost` FUNCTION `upgrade_del_func`() RETURNS char(30) CHARSET latin1
+BEGIN
+RETURN "INSIDE upgrade_del_func()";
+END latin1 latin1_swedish_ci latin1_swedish_ci
+SELECT upgrade_del_func();
+upgrade_del_func()
+INSIDE upgrade_del_func()
+"Check whether the definer user will be able to execute the replicated routine on slave"
+connection create_rout_db_slave;
+USE bug42217_db;
+SHOW CREATE FUNCTION upgrade_del_func;
+Function sql_mode Create Function character_set_client collation_connection Database Collation
+upgrade_del_func STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`create_rout_db`@`localhost` FUNCTION `upgrade_del_func`() RETURNS char(30) CHARSET latin1
+BEGIN
+RETURN "INSIDE upgrade_del_func()";
+END latin1 latin1_swedish_ci latin1_swedish_ci
+SELECT upgrade_del_func();
+upgrade_del_func()
+INSIDE upgrade_del_func()
+connection slave;
+DELETE FROM mysql.procs_priv;
+FLUSH PRIVILEGES;
+USE bug42217_db;
+"Can't execute the replicated routine on slave like before after procs privilege is deleted "
+SELECT upgrade_del_func();
+ERROR 42000: execute command denied to user 'create_rout_db'@'localhost' for routine 'bug42217_db.upgrade_del_func'
+"Test the user who creates a function on master doesn't exist on slave."
+"Hence SQL thread ACL_GLOBAL privilege jumps in and no mysql.procs_priv is inserted"
+DROP USER 'create_rout_db'@'localhost';
+connection create_rout_db_master;
+CREATE FUNCTION upgrade_alter_func() RETURNS CHAR(30)
+BEGIN
+RETURN "INSIDE upgrade_alter_func()";
+END//
+connection master;
+SELECT upgrade_alter_func();
+upgrade_alter_func()
+INSIDE upgrade_alter_func()
+connection slave;
+SHOW CREATE FUNCTION upgrade_alter_func;
+Function sql_mode Create Function character_set_client collation_connection Database Collation
+upgrade_alter_func STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`create_rout_db`@`localhost` FUNCTION `upgrade_alter_func`() RETURNS char(30) CHARSET latin1
+BEGIN
+RETURN "INSIDE upgrade_alter_func()";
+END latin1 latin1_swedish_ci latin1_swedish_ci
+"Should no privilege record for upgrade_alter_func in mysql.procs_priv"
+SELECT * FROM mysql.procs_priv;
+Host Db User Routine_name Routine_type Grantor Proc_priv Timestamp
+SELECT upgrade_alter_func();
+ERROR HY000: The user specified as a definer ('create_rout_db'@'localhost') does not exist
+disconnect create_rout_db_master;
+disconnect create_rout_db_slave;
+connection master;
+USE bug42217_db;
+DROP FUNCTION upgrade_del_func;
+DROP FUNCTION upgrade_alter_func;
+DROP DATABASE bug42217_db;
+connection slave;
+connection master;
+SET SQL_LOG_BIN= 0;
+DROP USER 'create_rout_db'@'localhost';
+SET SQL_LOG_BIN= 1;
+include/rpl_reset.inc
+USE test;
+######## BUG#49119 #######
+### i) test case from the 'how to repeat section'
+connection master;
+CREATE TABLE t1(c1 INT);
+CREATE PROCEDURE p1() SELECT * FROM t1 |
+REVOKE EXECUTE ON PROCEDURE p1 FROM 'root'@'localhost';
+ERROR 42000: There is no such grant defined for user 'root' on host 'localhost' on routine 'p1'
+connection slave;
+connection master;
+DROP TABLE t1;
+DROP PROCEDURE p1;
+connection slave;
+### ii) Test case in which REVOKE partially succeeds
+connection master;
+include/rpl_reset.inc
+connection master;
+CREATE TABLE t1(c1 INT);
+CREATE PROCEDURE p1() SELECT * FROM t1 |
+CREATE USER 'user49119'@'localhost';
+GRANT EXECUTE ON PROCEDURE p1 TO 'user49119'@'localhost';
+##############################################################
+### Showing grants for both users: root and user49119 (master)
+SHOW GRANTS FOR 'user49119'@'localhost';
+Grants for user49119@localhost
+GRANT USAGE ON *.* TO `user49119`@`localhost`
+GRANT EXECUTE ON PROCEDURE `test`.`p1` TO `user49119`@`localhost`
+SHOW GRANTS FOR CURRENT_USER;
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+##############################################################
+connection slave;
+##############################################################
+### Showing grants for both users: root and user49119 (master)
+SHOW GRANTS FOR 'user49119'@'localhost';
+Grants for user49119@localhost
+GRANT USAGE ON *.* TO `user49119`@`localhost`
+GRANT EXECUTE ON PROCEDURE `test`.`p1` TO `user49119`@`localhost`
+SHOW GRANTS FOR CURRENT_USER;
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+##############################################################
+connection master;
+## This statement will make the revoke fail because root has no
+## execute grant. However, it will still revoke the grant for
+## user49119.
+REVOKE EXECUTE ON PROCEDURE p1 FROM 'user49119'@'localhost', 'root'@'localhost';
+ERROR 42000: There is no such grant defined for user 'root' on host 'localhost' on routine 'p1'
+##############################################################
+### Showing grants for both users: root and user49119 (master)
+### after revoke statement failure
+SHOW GRANTS FOR 'user49119'@'localhost';
+Grants for user49119@localhost
+GRANT USAGE ON *.* TO `user49119`@`localhost`
+SHOW GRANTS FOR CURRENT_USER;
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+##############################################################
+connection slave;
+#############################################################
+### Showing grants for both users: root and user49119 (slave)
+### after revoke statement failure (should match
+SHOW GRANTS FOR 'user49119'@'localhost';
+Grants for user49119@localhost
+GRANT USAGE ON *.* TO `user49119`@`localhost`
+SHOW GRANTS FOR CURRENT_USER;
+Grants for root@localhost
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+##############################################################
+connection master;
+DROP TABLE t1;
+DROP PROCEDURE p1;
+DROP USER 'user49119'@'localhost';
+connection slave;
+include/rpl_reset.inc
+connection master;
+grant all on *.* to foo@"1.2.3.4";
+revoke all privileges, grant option from "foo";
+ERROR HY000: Can't revoke all privileges for one or more of the requested users
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; grant all on *.* to foo@"1.2.3.4"
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; revoke all privileges, grant option from "foo"
+connection slave;
+include/check_slave_no_error.inc
+connection master;
+DROP USER foo@"1.2.3.4";
+connection slave;
+
+# Bug#27606 GRANT statement should be replicated with DEFINER information
+include/rpl_reset.inc
+connection master;
+GRANT SELECT, INSERT ON mysql.user TO user_bug27606@localhost;
+SELECT Grantor FROM mysql.tables_priv WHERE User='user_bug27606';
+Grantor
+root@localhost
+connection slave;
+SELECT Grantor FROM mysql.tables_priv WHERE User='user_bug27606';
+Grantor
+root@localhost
+connection master;
+REVOKE SELECT ON mysql.user FROM user_bug27606@localhost;
+SELECT Grantor FROM mysql.tables_priv WHERE User='user_bug27606';
+Grantor
+root@localhost
+connection slave;
+SELECT Grantor FROM mysql.tables_priv WHERE User='user_bug27606';
+Grantor
+root@localhost
+connection master;
+DROP USER user_bug27606@localhost;
+select priv into @root_priv from mysql.global_priv where user='root' and host='127.0.0.1';
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_drop.result b/mysql-test/suite/engines/funcs/r/rpl_drop.result
index b83594c9bb1..74514fe7f61 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_drop.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_drop.result
@@ -1,10 +1,7 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-drop table if exists t1, t2;
+include/master-slave.inc
+[connection master]
create table t1 (a int);
drop table t1, t2;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_drop_db.result b/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
index b2d56af34bc..3712527afe4 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
@@ -1,29 +1,30 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
drop database if exists mysqltest1;
create database mysqltest1;
create table mysqltest1.t1 (n int);
insert into mysqltest1.t1 values (1);
select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
drop database mysqltest1;
-ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17)
+ERROR HY000: Error dropping database (can't rmdir './mysqltest1', errno: 39 "Directory not empty")
use mysqltest1;
show tables;
Tables_in_mysqltest1
drop database mysqltest1;
-ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17)
+ERROR HY000: Error dropping database (can't rmdir './mysqltest1', errno: 39 "Directory not empty")
use mysqltest1;
show tables;
Tables_in_mysqltest1
use test;
create table t1 (n int);
insert into t1 values (1234);
+connection slave;
+connection slave;
use mysqltest1;
show tables;
Tables_in_mysqltest1
@@ -31,9 +32,9 @@ use test;
select * from t1;
n
1234
-drop table t1;
-stop slave;
-drop database mysqltest1;
-drop database mysqltest1;
+connection master;
use test;
drop table t1;
+drop database mysqltest1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_dual_pos_advance.result b/mysql-test/suite/engines/funcs/r/rpl_dual_pos_advance.result
index 257baa81b74..ed9060f8307 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_dual_pos_advance.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_dual_pos_advance.result
@@ -1,22 +1,28 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
reset master;
+connection master;
change master to master_host="127.0.0.1",master_port=SLAVE_PORT,master_user="root";
-start slave;
+include/start_slave.inc
+connection slave;
create table t1 (n int);
+connection master;
+connection master;
create table t4 (n int);
create table t5 (n int);
create table t6 (n int);
+connection slave;
+connection slave;
+connection master;
show tables;
Tables_in_test
t1
t4
t5
t6
-stop slave;
+include/stop_slave.inc
reset slave;
drop table t1,t4,t5,t6;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_empty_master_crash.result b/mysql-test/suite/engines/funcs/r/rpl_empty_master_crash.result
deleted file mode 100644
index b5e14d3adac..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl_empty_master_crash.result
+++ /dev/null
@@ -1,12 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error
-load table t1 from master;
-ERROR 08S01: Error connecting to master: Master is not configured
-load table t1 from master;
-ERROR HY000: Error from master: 'Table 'test.t1' doesn't exist'
diff --git a/mysql-test/suite/engines/funcs/r/rpl_err_ignoredtable.result b/mysql-test/suite/engines/funcs/r/rpl_err_ignoredtable.result
index f211d5d9a2f..2017252f5d1 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_err_ignoredtable.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_err_ignoredtable.result
@@ -1,14 +1,12 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create table t1 (a int primary key);
create table t4 (a int primary key);
insert into t1 values (1),(1);
Got one of the listed errors
insert into t4 values (1),(2);
+connection slave;
show tables like 't1';
Tables_in_test (t1)
show tables like 't4';
@@ -18,25 +16,36 @@ SELECT * FROM test.t4 ORDER BY a;
a
1
2
+connection master;
drop table t1;
+connection slave;
+connection master1;
select get_lock('crash_lock%20C', 10);
get_lock('crash_lock%20C', 10)
1
+connection master;
create table t2 (a int primary key);
insert into t2 values(1);
create table t3 (id int);
insert into t3 values(connection_id());
update t2 set a = a + 1 + get_lock('crash_lock%20C', 10);
+connection master1;
select (@id := id) - id from t3;
(@id := id) - id
0
kill @id;
drop table t2,t3;
insert into t4 values (3),(4);
+connection master;
+connection master1;
+connection slave;
SELECT * FROM test.t4 ORDER BY a;
a
1
2
3
4
+connection master1;
DROP TABLE test.t4;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_flushlog_loop.result b/mysql-test/suite/engines/funcs/r/rpl_flushlog_loop.result
index 53a521aaf85..527dd8dc983 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_flushlog_loop.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_flushlog_loop.result
@@ -1,61 +1,14 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=MASTER_PORT;
-start slave;
-stop slave;
+include/start_slave.inc
+connection master;
change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=SLAVE_PORT;
-start slave;
-
-let $result_pattern= '%127.0.0.1%root%slave-bin.000001%slave-bin.000001%Yes%Yes%0%0%None%' ;
-
---source include/wait_slave_status.inc
+include/start_slave.inc
flush logs;
-SHOW SLAVE STATUS;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port SLAVE_PORT
-Connect_Retry 60
-Master_Log_File slave-bin.000001
-Read_Master_Log_Pos 107
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File slave-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running Yes
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table #
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos 107
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 2
-STOP SLAVE;
+include/stop_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_free_items.result b/mysql-test/suite/engines/funcs/r/rpl_free_items.result
index 91c1e2aa6e5..af7a267266f 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_free_items.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_free_items.result
@@ -1,10 +1,10 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1 (a int);
create table t2 (a int);
+connection slave;
+connection master;
drop table t1;
drop table t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_get_lock.result b/mysql-test/suite/engines/funcs/r/rpl_get_lock.result
index f7c9541bd9f..b852546e1bf 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_get_lock.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_get_lock.result
@@ -1,17 +1,17 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
create table t1(n int);
insert into t1 values(get_lock("lock",2));
+disconnect master;
+connection master1;
select get_lock("lock",2);
get_lock("lock",2)
1
select release_lock("lock");
release_lock("lock")
1
+connection slave;
select get_lock("lock",3);
get_lock("lock",3)
1
@@ -32,4 +32,7 @@ is_free_lock("lock2")
select is_free_lock(NULL);
is_free_lock(NULL)
NULL
+connection master1;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_ignore_grant.result b/mysql-test/suite/engines/funcs/r/rpl_ignore_grant.result
index 5169cc8e888..0a5564ac6c0 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_ignore_grant.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_ignore_grant.result
@@ -1,21 +1,23 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
+set sql_mode="";
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
+connection slave;
+set sql_mode="";
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
+connection master;
grant select on *.* to rpl_ignore_grant@localhost;
grant drop on test.* to rpl_ignore_grant@localhost;
show grants for rpl_ignore_grant@localhost;
Grants for rpl_ignore_grant@localhost
-GRANT SELECT ON *.* TO 'rpl_ignore_grant'@'localhost'
-GRANT DROP ON `test`.* TO 'rpl_ignore_grant'@'localhost'
+GRANT SELECT ON *.* TO `rpl_ignore_grant`@`localhost`
+GRANT DROP ON `test`.* TO `rpl_ignore_grant`@`localhost`
+connection slave;
show grants for rpl_ignore_grant@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_ignore_grant' on host 'localhost'
select count(*) from mysql.user where user=_binary'rpl_ignore_grant';
@@ -25,13 +27,19 @@ select count(*) from mysql.db where user=_binary'rpl_ignore_grant';
count(*)
0
grant select on *.* to rpl_ignore_grant@localhost;
+connection master;
set password for rpl_ignore_grant@localhost=password("does it work?");
+connection slave;
select password<>_binary'' from mysql.user where user=_binary'rpl_ignore_grant';
password<>_binary''
0
+connection master;
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
+connection slave;
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
+connection master;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_ignore_revoke.result b/mysql-test/suite/engines/funcs/r/rpl_ignore_revoke.result
index b1ccd2f0442..cc65d9dacfd 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_ignore_revoke.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_ignore_revoke.result
@@ -1,30 +1,34 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
revoke select on *.* from 'user_foo'@'%';
select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-select_priv
+Select_priv
N
+connection slave;
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
revoke select on *.* from 'user_foo'@'%';
select select_priv from mysql.user where user='user_foo' /* slave:must be N */;
-select_priv
+Select_priv
N
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
select select_priv from mysql.user where user='user_foo' /* slave:must be Y */;
-select_priv
+Select_priv
Y
+connection master;
revoke select on *.* from 'user_foo';
select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-select_priv
+Select_priv
N
+connection slave;
select select_priv from mysql.user where user='user_foo' /* slave:must get Y */;
-select_priv
+Select_priv
Y
+connection slave;
revoke select on *.* FROM 'user_foo';
+connection master;
delete from mysql.user where user="user_foo";
+connection slave;
delete from mysql.user where user="user_foo";
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_ignore_table_update.result b/mysql-test/suite/engines/funcs/r/rpl_ignore_table_update.result
index a88a3c690ed..d4efa2dc42e 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_ignore_table_update.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_ignore_table_update.result
@@ -1,14 +1,12 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
use test;
drop table if exists mysqltest_foo;
drop table if exists mysqltest_bar;
create table mysqltest_foo (n int);
insert into mysqltest_foo values(4);
+connection master;
use test;
create table mysqltest_foo (n int);
insert into mysqltest_foo values(5);
@@ -16,8 +14,12 @@ create table mysqltest_bar (m int);
insert into mysqltest_bar values(15);
create table t1 (k int);
insert into t1 values(55);
+connection slave;
select mysqltest_foo.n,mysqltest_bar.m,t1.k from mysqltest_foo,mysqltest_bar,t1;
n m k
4 15 55
+connection master;
drop table mysqltest_foo,mysqltest_bar,t1;
+connection slave;
drop table mysqltest_foo,mysqltest_bar,t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_init_slave.result b/mysql-test/suite/engines/funcs/r/rpl_init_slave.result
index 740c918976c..a90de76f139 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_init_slave.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_init_slave.result
@@ -1,9 +1,11 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+set global max_connections=151;
+connection slave;
+include/stop_slave.inc
+include/start_slave.inc
+connection master;
+connection slave;
show variables like 'init_slave';
Variable_name Value
init_slave set global max_connections=500
@@ -11,17 +13,21 @@ show variables like 'max_connections';
Variable_name Value
max_connections 500
reset master;
+connection master;
show variables like 'init_slave';
Variable_name Value
init_slave
show variables like 'max_connections';
Variable_name Value
max_connections 151
+connection slave;
set @my_global_init_connect= @@global.init_connect;
set global init_connect="set @c=1";
show variables like 'init_connect';
Variable_name Value
init_connect set @c=1
-stop slave;
+connection master;
+connection slave;
set global init_connect= @my_global_init_connect;
set global max_connections= default;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_insert.result b/mysql-test/suite/engines/funcs/r/rpl_insert.result
index b6a97926f73..1e790387e71 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_insert.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_insert.result
@@ -1,23 +1,25 @@
#
# Bug#20821: INSERT DELAYED fails to write some rows to binlog
#
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
CREATE SCHEMA IF NOT EXISTS mysqlslap;
USE mysqlslap;
-CREATE TABLE t1 (id INT, name VARCHAR(64));
+CREATE TABLE t1 (id INT, name VARCHAR(64)) ENGINE=MyISAM;
+connection slave;
+connection master;
SELECT COUNT(*) FROM mysqlslap.t1;
COUNT(*)
5000
+connection slave;
SELECT COUNT(*) FROM mysqlslap.t1;
COUNT(*)
5000
#
# Cleanup
#
+connection master;
USE test;
DROP SCHEMA mysqlslap;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_insert_select.result b/mysql-test/suite/engines/funcs/r/rpl_insert_select.result
index 1aff39e0026..3c9b31ded56 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_insert_select.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_insert_select.result
@@ -1,17 +1,21 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create table t1 (n int not null primary key);
insert into t1 values (1);
create table t2 (n int);
insert into t2 values (1);
insert ignore into t1 select * from t2;
+Warnings:
+Warning 1062 Duplicate entry '1' for key 'PRIMARY'
insert into t1 values (2);
+connection slave;
+connection slave;
select * from t1;
n
1
2
+connection master;
drop table t1,t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_loaddata2.result b/mysql-test/suite/engines/funcs/r/rpl_loaddata2.result
index 5e3923616ed..d7a9a5981dc 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_loaddata2.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_loaddata2.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
CREATE TABLE t1 (word CHAR(20) NOT NULL);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
SELECT * FROM t1 ORDER BY word;
@@ -78,6 +74,7 @@ Aberdeen
Abernathy
aberrant
aberration
+connection slave;
SELECT * FROM t1 ORDER BY word;
word
Aarhus
@@ -150,4 +147,7 @@ Aberdeen
Abernathy
aberrant
aberration
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_loaddata_m.result b/mysql-test/suite/engines/funcs/r/rpl_loaddata_m.result
index 4639c717cea..8e2bc2f0b1c 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_loaddata_m.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_loaddata_m.result
@@ -1,10 +1,7 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
drop database if exists mysqltest;
+connection master;
USE test;
CREATE TABLE t1(a INT, b INT, UNIQUE(b));
LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE test.t1;
@@ -18,6 +15,7 @@ LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
SELECT COUNT(*) FROM mysqltest.t1;
COUNT(*)
2
+connection slave;
SHOW DATABASES;
Database
information_schema
@@ -36,5 +34,8 @@ t1
SELECT COUNT(*) FROM mysqltest.t1;
COUNT(*)
2
+connection master;
DROP DATABASE mysqltest;
-DROP TABLE test.t1;
+DROP TABLE IF EXISTS test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_loaddata_s.result b/mysql-test/suite/engines/funcs/r/rpl_loaddata_s.result
index 8b2da7ff9a1..1eb37dd4899 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_loaddata_s.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_loaddata_s.result
@@ -1,15 +1,16 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
reset master;
+connection master;
create table test.t1(a int, b int, unique(b));
load data infile '../../std_data/rpl_loaddata.dat' into table test.t1;
+connection slave;
select count(*) from test.t1;
count(*)
2
-show binlog events from 107;
-Log_name Pos Event_type Server_id End_log_pos Info
+include/show_binlog_events.inc
+connection master;
drop table test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_loaddatalocal.result b/mysql-test/suite/engines/funcs/r/rpl_loaddatalocal.result
index 4dd4289b657..f7034cb7968 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_loaddatalocal.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_loaddatalocal.result
@@ -1,31 +1,37 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1(a int);
select * into outfile 'MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' from t1;
truncate table t1;
load data local infile 'MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' into table t1;
+connection slave;
select a,count(*) from t1 group by a;
a count(*)
1 10000
+connection master;
drop table t1;
+connection slave;
+connection master;
create table t1(a int);
insert into t1 values (1), (2), (2), (3);
select * into outfile 'MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' from t1;
drop table t1;
create table t1(a int primary key);
load data local infile 'MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' into table t1;
+Warnings:
+Warning 1062 Duplicate entry '2' for key 'PRIMARY'
SELECT * FROM t1 ORDER BY a;
a
1
2
3
+connection slave;
SELECT * FROM t1 ORDER BY a;
a
1
2
3
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_loadfile.result b/mysql-test/suite/engines/funcs/r/rpl_loadfile.result
index 7a5a7bc50c9..fb40cbf3220 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_loadfile.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_loadfile.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1 (a INT, blob_column LONGBLOB, PRIMARY KEY(a));
@@ -119,6 +116,8 @@ Abernathy
aberrant
aberration
+connection slave;
+connection slave;
SELECT * FROM test.t1 ORDER BY blob_column;
a blob_column
1 abase
@@ -223,5 +222,8 @@ Abernathy
aberrant
aberration
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_log_pos.result b/mysql-test/suite/engines/funcs/r/rpl_log_pos.result
index df4512fa0ea..7f5f34bf831 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_log_pos.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_log_pos.result
@@ -1,46 +1,37 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+create table if not exists t1 (n int);
+drop table t1;
+call mtr.add_suppression ("Slave I/O: Got fatal error 1236 from master when reading data from binary");
+call mtr.add_suppression ("Error in Log_event::read_log_event");
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000001 107 <Binlog_Ignore_DB>
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 107 # # master-bin.000001 Yes Yes 0 0 107 # None 0 No # No 0 0 1
-stop slave;
-change master to master_log_pos=107;
-start slave;
-stop slave;
-change master to master_log_pos=107;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 107 # # master-bin.000001 No No 0 0 107 # None 0 No # No 0 0 1
-start slave;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 107 # # master-bin.000001 Yes Yes 0 0 107 # None 0 No # No 0 0 1
-stop slave;
-change master to master_log_pos=178;
+master-bin.000001 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+connection slave;
+include/stop_slave.inc
+change master to master_log_pos=MASTER_LOG_POS;
start slave;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
-# 127.0.0.1 root MASTER_PORT 1 master-bin.000001 178 # # master-bin.000001 No Yes 0 0 178 # None 0 No # No 1236 Got fatal error 1236 from master when reading data from binary log: 'Client requested master to start replication from impossible position' 0 1
+include/wait_for_slave_io_error.inc [errno=1236]
+Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'binlog truncated in the middle of event; consider out of disk space on master; the first event 'master-bin.000001' at XXX, the last event read from 'master-bin.000001' at XXX, the last byte read from 'master-bin.000001' at XXX.''
+include/stop_slave_sql.inc
+connection master;
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000001 107 <Binlog_Ignore_DB>
+master-bin.000001 # <Binlog_Do_DB> <Binlog_Ignore_DB>
create table if not exists t1 (n int);
drop table if exists t1;
create table t1 (n int);
insert into t1 values (1),(2),(3);
-stop slave;
-change master to master_log_pos=207;
+connection slave;
+change master to master_log_pos=MASTER_LOG_POS;
start slave;
select * from t1 ORDER BY n;
n
1
2
3
+connection master;
drop table t1;
+connection slave;
+End of 5.0 tests
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_many_optimize.result b/mysql-test/suite/engines/funcs/r/rpl_many_optimize.result
index b2148892591..82b1f685ea8 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_many_optimize.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_many_optimize.result
@@ -1,9 +1,7 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1 (a int not null auto_increment primary key, b int, key(b));
INSERT INTO t1 (a) VALUES (1),(2);
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_master_pos_wait.result b/mysql-test/suite/engines/funcs/r/rpl_master_pos_wait.result
deleted file mode 100644
index ab9b8cdad9d..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl_master_pos_wait.result
+++ /dev/null
@@ -1,18 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-select master_pos_wait('master-bin.999999',0,2);
-master_pos_wait('master-bin.999999',0,2)
--1
-explain extended select master_pos_wait('master-bin.999999',0,2);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-Warnings:
-Note 1003 select master_pos_wait('master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)`
-select master_pos_wait('master-bin.999999',0);
-stop slave sql_thread;
-master_pos_wait('master-bin.999999',0)
-NULL
diff --git a/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result b/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
index 5003a3e7d65..302cf2351c2 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+CALL mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT');
create table t1(id int, i int, r1 int, r2 int, p varchar(100));
insert into t1 values(1, connection_id(), 0, 0, "");
insert into t1 values(2, 0, rand()*1000, rand()*1000, "");
@@ -13,12 +10,52 @@ delete from t1 where id=6;
set sql_log_bin=1;
insert into t1 values(3, 0, 0, 0, password('does_this_work?'));
insert into t1 values(4, connection_id(), rand()*1000, rand()*1000, password('does_this_still_work?'));
-select * into outfile '../../tmp/rpl_misc_functions.outfile' from t1;
-create table t2 like t1;
-load data local infile 'MYSQLTEST_VARDIR/tmp/rpl_misc_functions.outfile' into table t2;
+select * into outfile 'rpl_misc_functions.outfile' from t1;
+connection slave;
+create temporary table t2 like t1;
+load data local infile 'MYSQLD_DATADIR/test/rpl_misc_functions.outfile' into table t2;
select * from t1, t2 where (t1.id=t2.id) and not(t1.i=t2.i and t1.r1=t2.r1 and t1.r2=t2.r2 and t1.p=t2.p);
id i r1 r2 p id i r1 r2 p
-stop slave;
-drop table t1;
-drop table t2;
+connection master;
drop table t1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
+col_a DOUBLE DEFAULT NULL);
+CREATE PROCEDURE test_replication_sp1()
+BEGIN
+INSERT INTO t1 (col_a) VALUES (rand()), (rand());
+INSERT INTO t1 (col_a) VALUES (rand());
+END|
+CREATE PROCEDURE test_replication_sp2()
+BEGIN
+CALL test_replication_sp1();
+CALL test_replication_sp1();
+END|
+CREATE FUNCTION test_replication_sf() RETURNS DOUBLE DETERMINISTIC
+BEGIN
+RETURN (rand() + rand());
+END|
+CALL test_replication_sp1();
+CALL test_replication_sp2();
+INSERT INTO t1 (col_a) VALUES (test_replication_sf());
+INSERT INTO t1 (col_a) VALUES (test_replication_sf());
+INSERT INTO t1 (col_a) VALUES (test_replication_sf());
+connection slave;
+select * from t1 into outfile "../../tmp/t1_slave.txt";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+connection master;
+create temporary table t1_slave select * from t1 where 1=0;
+load data infile '../../tmp/t1_slave.txt' into table t1_slave;
+select count(*) into @aux from t1 join t1_slave using (id)
+where ABS(t1.col_a - t1_slave.col_a) < 0.0000001 ;
+SELECT @aux;
+@aux
+12
+connection master;
+DROP TABLE t1, t1_slave;
+DROP PROCEDURE test_replication_sp1;
+DROP PROCEDURE test_replication_sp2;
+DROP FUNCTION test_replication_sf;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_multi_delete.result b/mysql-test/suite/engines/funcs/r/rpl_multi_delete.result
index 4831502eb88..62a0d74225d 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_multi_delete.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_multi_delete.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1 (a int primary key);
create table t2 (a int);
insert into t1 values (1);
@@ -14,9 +10,13 @@ a
select * from t2;
a
1
+connection slave;
select * from t1;
a
select * from t2;
a
1
+connection master;
drop table t1,t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_multi_delete2.result b/mysql-test/suite/engines/funcs/r/rpl_multi_delete2.result
index 8e6a6a3d4d0..0a316ec3452 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_multi_delete2.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_multi_delete2.result
@@ -1,13 +1,12 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
set sql_log_bin=0;
create database mysqltest_from;
set sql_log_bin=1;
+connection slave;
create database mysqltest_to;
+connection master;
use mysqltest_from;
drop table if exists a;
CREATE TABLE a (i INT);
@@ -20,10 +19,12 @@ delete alias FROM a alias where alias.i=2;
select * from a;
i
3
+connection slave;
use mysqltest_to;
select * from a;
i
3
+connection master;
create table t1 (a int primary key);
create table t2 (a int);
insert into t1 values (1);
@@ -34,11 +35,15 @@ a
select * from t2;
a
1
+connection slave;
select * from t1;
ERROR 42S02: Table 'mysqltest_to.t1' doesn't exist
select * from t2;
ERROR 42S02: Table 'mysqltest_to.t2' doesn't exist
+connection master;
set sql_log_bin=0;
drop database mysqltest_from;
set sql_log_bin=1;
+connection slave;
drop database mysqltest_to;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_multi_update4.result b/mysql-test/suite/engines/funcs/r/rpl_multi_update4.result
index f6dde65a35d..256c1c503ab 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_multi_update4.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_multi_update4.result
@@ -1,12 +1,11 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
drop database if exists d1;
drop database if exists d2;
+connection slave;
drop database if exists d2;
+connection master;
create database d1;
create table d1.t0 (id int);
create database d2;
@@ -17,9 +16,13 @@ insert into t1 values (1), (2), (3), (4), (5);
insert into t2 select id + 3 from t1;
update t1 join t2 using (id) set t1.id = 0;
insert into d1.t0 values (0);
+connection slave;
use d1;
select * from t0 where id=0;
id
0
+connection master;
drop database d1;
drop database d2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_ps.result b/mysql-test/suite/engines/funcs/r/rpl_ps.result
index 73c36af4862..57c2db05811 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_ps.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_ps.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
drop table if exists t1;
create table t1(n char(30));
prepare stmt1 from 'insert into t1 values (?)';
@@ -19,12 +15,40 @@ set @var2= 'insert into t1 values (concat("from-var-", ?))';
prepare stmt2 from @var2;
set @var1='from-master-3';
execute stmt2 using @var1;
+connection slave;
SELECT * FROM t1 ORDER BY n;
n
from-master-1
from-master-2-'',
from-var-from-master-3
+connection master;
drop table t1;
+connection slave;
stop slave;
-reset master;
-reset slave;
+include/wait_for_slave_to_stop.inc
+
+########################################################################
+#
+# BUG#25843: Changing default database between PREPARE and EXECUTE of
+# statement breaks binlog.
+#
+########################################################################
+connection slave;
+START SLAVE;
+connection master;
+CREATE DATABASE mysqltest1;
+CREATE TABLE t1(db_name CHAR(32), db_col_name CHAR(32));
+PREPARE stmt_d_1 FROM 'INSERT INTO t1 VALUES(DATABASE(), @@collation_database)';
+EXECUTE stmt_d_1;
+use mysqltest1;
+EXECUTE stmt_d_1;
+connection slave;
+SELECT * FROM t1;
+db_name db_col_name
+test latin1_swedish_ci
+test latin1_swedish_ci
+connection master;
+DROP DATABASE mysqltest1;
+use test;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_rbr_to_sbr.result b/mysql-test/suite/engines/funcs/r/rpl_rbr_to_sbr.result
deleted file mode 100644
index 13f7fbfbdeb..00000000000
--- a/mysql-test/suite/engines/funcs/r/rpl_rbr_to_sbr.result
+++ /dev/null
@@ -1,55 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-**** On Master ****
-CREATE TABLE t1 (a INT, b LONG);
-INSERT INTO t1 VALUES (1,1), (2,2);
-INSERT INTO t1 VALUES (3,UUID()), (4,UUID());
-SHOW BINLOG EVENTS;
-**** On Slave ****
-SHOW SLAVE STATUS;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_PORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running Yes
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
-SHOW BINLOG EVENTS;
-DROP TABLE IF EXISTS t1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_relayspace.result b/mysql-test/suite/engines/funcs/r/rpl_relayspace.result
index 1f2a739d3e3..90bf18aaa88 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_relayspace.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_relayspace.result
@@ -1,19 +1,18 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
+connection master;
create table t1 (a int);
drop table t1;
create table t1 (a int);
drop table t1;
+connection slave;
reset slave;
start slave io_thread;
-stop slave io_thread;
+include/wait_for_slave_param.inc [Slave_IO_State]
+include/stop_slave_io.inc
reset slave;
-start slave;
-select master_pos_wait('master-bin.001',200,6)=-1;
-master_pos_wait('master-bin.001',200,6)=-1
-0
+include/start_slave.inc
+include/assert.inc [Assert that master_pos_wait does not timeout nor it returns NULL]
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_replicate_ignore_db.result b/mysql-test/suite/engines/funcs/r/rpl_replicate_ignore_db.result
index 0135804c02d..1192f9860f2 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_replicate_ignore_db.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_replicate_ignore_db.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
drop database if exists mysqltest1;
drop database if exists mysqltest2;
create database mysqltest1;
@@ -11,13 +7,19 @@ create database mysqltest2;
use mysqltest1;
create table t1 (a int);
insert into t1 values(1);
+connection slave;
select * from mysqltest1.t1;
ERROR 42S02: Table 'mysqltest1.t1' doesn't exist
+connection master;
use mysqltest2;
create table t1 (a int);
insert into t1 values(1);
+connection slave;
select * from mysqltest2.t1;
a
1
+connection master;
drop database mysqltest1;
drop database mysqltest2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_NOW.result b/mysql-test/suite/engines/funcs/r/rpl_row_NOW.result
index 23f95878608..480ad34ce69 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_NOW.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_NOW.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create database if not exists mysqltest1;
DROP TABLE IF EXISTS mysqltest1.t1;
CREATE TABLE mysqltest1.t1 (n MEDIUMINT NOT NULL AUTO_INCREMENT,
@@ -24,6 +21,10 @@ FOR EACH ROW BEGIN
SET new.b = mysqltest1.f1();
END|
INSERT INTO mysqltest1.t1 SET n = NULL, a = now();
+connection slave;
+connection master;
DROP TABLE IF EXISTS mysqltest1.t1;
DROP FUNCTION mysqltest1.f1;
DROP DATABASE mysqltest1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_USER.result b/mysql-test/suite/engines/funcs/r/rpl_row_USER.result
index c4774665307..2771c674f44 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_USER.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_USER.result
@@ -1,15 +1,15 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+set local sql_mode="";
+connection master;
DROP DATABASE IF EXISTS mysqltest1;
CREATE DATABASE mysqltest1;
CREATE USER tester IDENTIFIED BY 'test';
GRANT ALL ON mysqltest1.* TO 'tester'@'%' IDENTIFIED BY 'test';
GRANT ALL ON mysqltest1.* TO ''@'localhost%';
FLUSH PRIVILEGES;
+connect m_1,localhost,tester,,mysqltest1;
+connection m_1;
CREATE TABLE mysqltest1.t1 (a INT, users VARCHAR(255), PRIMARY KEY(a));
INSERT INTO mysqltest1.t1 VALUES(1,USER());
INSERT INTO mysqltest1.t1 VALUES(2,CURRENT_USER());
@@ -19,21 +19,26 @@ INSERT INTO mysqltest1.t1 VALUES(3,USER());
INSERT INTO mysqltest1.t1 VALUES(4,CURRENT_USER());
end|
CALL mysqltest1.p1();
+connection master;
SELECT * FROM mysqltest1.t1 ORDER BY a;
a users
1 tester@localhost
2 @localhost%
3 tester@localhost
4 @localhost%
+connection slave;
SELECT * FROM mysqltest1.t1 ORDER BY a;
a users
1 tester@localhost
2 @localhost%
3 tester@localhost
4 @localhost%
+connection master;
+DROP DATABASE mysqltest1;
REVOKE ALL ON mysqltest1.* FROM 'tester'@'%';
REVOKE ALL ON mysqltest1.* FROM ''@'localhost%';
-DROP DATABASE mysqltest1;
-DROP USER 'tester';
+DROP USER tester@'%';
DROP USER ''@'localhost%';
FLUSH PRIVILEGES;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_drop.result b/mysql-test/suite/engines/funcs/r/rpl_row_drop.result
index d45bbadf7df..8753764e81e 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_drop.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_drop.result
@@ -1,10 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-**** On Master ****
+include/master-slave.inc
+[connection master]
+connection master;
CREATE TABLE t1 (a int);
CREATE TABLE t2 (a int);
CREATE TEMPORARY TABLE t2 (a int, b int);
@@ -12,47 +8,48 @@ SHOW TABLES;
Tables_in_test
t1
t2
-**** On Slave ****
+connection slave;
SHOW TABLES;
Tables_in_test
t1
t2
-**** On Master ****
+connection master;
DROP TABLE t2;
SHOW TABLES;
Tables_in_test
t1
t2
-**** On Slave ****
+connection slave;
SHOW TABLES;
Tables_in_test
t1
t2
-**** On Master ****
+connection master;
CREATE TEMPORARY TABLE t2 (a int, b int);
SHOW TABLES;
Tables_in_test
t1
t2
-**** On Slave ****
+connection slave;
SHOW TABLES;
Tables_in_test
t1
t2
-**** On Master ****
+connection master;
DROP TABLE t1,t2;
-SHOW BINLOG EVENTS;
+include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 4 Format_desc 1 107 Server ver: VERSION, Binlog ver: 4
-master-bin.000001 107 Query 1 193 use `test`; CREATE TABLE t1 (a int)
-master-bin.000001 193 Query 1 279 use `test`; CREATE TABLE t2 (a int)
-master-bin.000001 279 Query 1 403 use `test`; DROP TEMPORARY TABLE IF EXISTS `t2` /* generated by server */
-master-bin.000001 403 Query 1 527 use `test`; DROP TEMPORARY TABLE IF EXISTS `t2` /* generated by server */
-master-bin.000001 527 Query 1 631 use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a int)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a int)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
SHOW TABLES;
Tables_in_test
t2
-**** On Slave ****
+connection slave;
SHOW TABLES;
Tables_in_test
t2
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_func001.result b/mysql-test/suite/engines/funcs/r/rpl_row_func001.result
index b20f3f724d0..ae05b5cf3c0 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_func001.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_func001.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP FUNCTION test.f1;
DROP TABLE IF EXISTS test.t1;
create table test.t1 (a int, PRIMARY KEY(a));
@@ -22,9 +19,14 @@ select * from test.t1;
a
1
2
+connection slave;
+connection slave;
select * from test.t1;
a
1
2
+connection master;
DROP FUNCTION test.f1;
DROP TABLE test.t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_inexist_tbl.result b/mysql-test/suite/engines/funcs/r/rpl_row_inexist_tbl.result
index aff54e01b95..e2bd199ecab 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_inexist_tbl.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_inexist_tbl.result
@@ -1,14 +1,12 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create table t1 (a int not null primary key);
insert into t1 values (1);
create table t2 (a int);
insert into t2 values (1);
update t1, t2 set t1.a = 0 where t1.a = t2.a;
+connection slave;
show tables;
Tables_in_test
t1
@@ -16,46 +14,14 @@ select * from t1;
a
0
drop table t1;
+connection master;
insert into t1 values (1);
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User root
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running Yes
-Slave_SQL_Running No
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table test.t2
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 1146
-Last_Error Error executing row event: 'Table 'test.t1' doesn't exist'
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master #
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno 0
-Last_IO_Error
-Last_SQL_Errno 1146
-Last_SQL_Error Error executing row event: 'Table 'test.t1' doesn't exist'
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
+connection slave;
+call mtr.add_suppression("Slave SQL.*Error executing row event: .Table .test.t1. doesn.t exist., error.* 1146");
+include/wait_for_slave_sql_error.inc [errno=1146]
+==== Clean up ====
+include/stop_slave_io.inc
+RESET SLAVE;
+connection master;
drop table t1, t2;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_max_relay_size.result b/mysql-test/suite/engines/funcs/r/rpl_row_max_relay_size.result
index 547dd8e1541..41d18d7f9fe 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_max_relay_size.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_max_relay_size.result
@@ -1,20 +1,19 @@
+include/master-slave.inc
+[connection master]
+connection slave;
stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
+connection master;
#
# Generate a big enough master's binlog to cause relay log rotations
#
create table t1 (a int);
drop table t1;
+connection slave;
reset slave;
#
# Test 1
#
-set @my_max_binlog_size= @@global.max_binlog_size;
+set @my_max_binlog_size= @@global.max_binlog_size, @my_max_relay_log_size=@@global.max_relay_log_size;
set global max_binlog_size=8192;
set global max_relay_log_size=8192-1;
Warnings:
@@ -23,7 +22,7 @@ select @@global.max_relay_log_size;
@@global.max_relay_log_size
4096
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 2
#
@@ -33,17 +32,19 @@ set global max_relay_log_size=(5*4096);
select @@global.max_relay_log_size;
@@global.max_relay_log_size 20480
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 3: max_relay_log_size = 0
#
stop slave;
reset slave;
set global max_relay_log_size=0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
select @@global.max_relay_log_size;
-@@global.max_relay_log_size 0
+@@global.max_relay_log_size 4096
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 4: Tests below are mainly to ensure that we have not coded with wrong assumptions
#
@@ -56,19 +57,27 @@ flush logs;
reset slave;
start slave;
flush logs;
+connection master;
create table t1 (a int);
-Checking that both slave threads are running.
+connection slave;
+include/check_slave_is_running.inc
#
# Test 6: one more rotation, to be sure Relay_Log_Space is correctly updated
#
flush logs;
+connection master;
drop table t1;
-Checking that both slave threads are running.
+connection slave;
+include/check_slave_is_running.inc
+connection master;
flush logs;
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000002 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+connection slave;
set global max_binlog_size= @my_max_binlog_size;
+set global max_relay_log_size= @my_max_relay_log_size;
#
# End of 4.1 tests
#
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_reset_slave.result b/mysql-test/suite/engines/funcs/r/rpl_row_reset_slave.result
index 5499d41bc1f..b9e98d5a97d 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_reset_slave.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_reset_slave.result
@@ -1,41 +1,58 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-Master_User root
-Master_Host 127.0.0.1
+include/master-slave.inc
+[connection master]
+connection slave;
+Master_User = 'root'
+Master_Host = '127.0.0.1'
include/stop_slave.inc
change master to master_user='test';
-Master_User test
-Master_Host 127.0.0.1
+Master_User = 'test'
+Master_Host = '127.0.0.1'
reset slave;
-Master_User test
-Master_Host 127.0.0.1
+Master_User = 'test'
+Master_Host = '127.0.0.1'
change master to master_user='root';
include/start_slave.inc
-Master_User root
-Master_Host 127.0.0.1
+Master_User = 'root'
+Master_Host = '127.0.0.1'
include/stop_slave.inc
reset slave;
include/start_slave.inc
+connection master;
create temporary table t1 (a int);
+connection slave;
include/stop_slave.inc
reset slave;
include/start_slave.inc
show status like 'slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 0
+connection master;
+drop temporary table if exists t1;
+connection slave;
include/stop_slave.inc
reset slave;
+include/check_slave_no_error.inc
change master to master_user='impossible_user_name';
start slave;
-stop slave;
+include/wait_for_slave_io_error.inc [errno=1045]
+include/stop_slave_sql.inc
change master to master_user='root';
include/start_slave.inc
+include/check_slave_no_error.inc
stop slave;
change master to master_user='impossible_user_name';
start slave;
-stop slave;
+include/wait_for_slave_io_error.inc [errno=1045]
+include/stop_slave_sql.inc
reset slave;
+include/check_slave_no_error.inc
+change master to master_user='root';
+reset slave;
+include/start_slave.inc
+include/stop_slave.inc
+reset slave all;
+start slave;
+ERROR HY000: Misconfigured slave: MASTER_HOST was not set; Fix in config file or with CHANGE MASTER TO
+CHANGE MASTER TO MASTER_HOST= 'MASTER_HOST', MASTER_USER= 'MASTER_USER', MASTER_PORT= MASTER_PORT;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp001.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp001.result
index 8c26c061376..d37425c43a8 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp001.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp001.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
<Begin test section 1 (non deterministic SP)>
---------------------------------------------
@@ -34,12 +30,14 @@ a t
2 NEW
3 NEW
4 NEW
+connection slave;
SELECT * FROM t2 ORDER BY a;
a t
1 NEW
2 NEW
3 NEW
4 NEW
+connection master;
call test.p2(1);
SELECT * FROM t2 ORDER BY a;
a t
@@ -47,12 +45,14 @@ a t
2 Tex
3 Tex
4 Tex
+connection slave;
SELECT * FROM t2 ORDER BY a;
a t
1 Tex
2 Tex
3 Tex
4 Tex
+connection master;
call test.p2(2);
SELECT * FROM t2 ORDER BY a;
a t
@@ -60,12 +60,14 @@ a t
2 SQL
3 SQL
4 SQL
+connection slave;
SELECT * FROM t2 ORDER BY a;
a t
1 SQL
2 SQL
3 SQL
4 SQL
+connection master;
call test.p2(3);
SELECT * FROM t2 ORDER BY a;
a t
@@ -73,13 +75,17 @@ a t
2 NONE
3 NONE
4 NONE
+connection slave;
SELECT * FROM t2 ORDER BY a;
a t
1 NONE
2 NONE
3 NONE
4 NONE
+connection master;
DROP PROCEDURE test.p1;
DROP PROCEDURE test.p2;
DROP TABLE test.t1;
DROP TABLE test.t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp005.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp005.result
index 58c53b394b2..8acc2e20202 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp005.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp005.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t2;
@@ -42,9 +39,6 @@ INSERT INTO test.t1 VALUES (4,'MySQL'),(20,'ROCKS'),(11,'Texas'),(10,'kyle');
INSERT INTO test.t2 VALUES (4),(2),(1),(3);
UPDATE test.t1 SET id=id+4 WHERE id=4;
END|
-
-< ---- Master selects-- >
--------------------------
CALL test.p2();
SELECT * FROM test.t1 ORDER BY id;
id data
@@ -58,9 +52,7 @@ id2
2
3
4
-
-< ---- Slave selects-- >
-------------------------
+connection slave;
SELECT * FROM test.t1 ORDER BY id;
id data
8 MySQL
@@ -73,30 +65,28 @@ id2
2
3
4
-
-< ---- Master selects-- >
--------------------------
+connection master;
CALL test.p1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM test.t3 ORDER BY id3;
id3 c
1 MySQL
2 kyle
3 Texas
4 ROCKS
-
-< ---- Slave selects-- >
-------------------------
+connection slave;
SELECT * FROM test.t3 ORDER BY id3;
id3 c
1 MySQL
2 kyle
3 Texas
4 ROCKS
+connection master;
ALTER PROCEDURE test.p1 MODIFIES SQL DATA;
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
DROP TABLE IF EXISTS test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp008.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp008.result
index 23197964a24..481c9b3f6cc 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp008.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp008.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE IF EXISTS test.t2;
CREATE TABLE test.t1 (a INT,PRIMARY KEY(a));
@@ -14,21 +11,19 @@ BEGIN
SELECT SQL_CALC_FOUND_ROWS * FROM test.t1 LIMIT 1;
INSERT INTO test.t2 VALUES(FOUND_ROWS());
END|
-
-< ---- Master selects-- >
--------------------------
CALL test.p1();
a
1
SELECT * FROM test.t2;
a
2
-
-< ---- Slave selects-- >
-------------------------
+connection slave;
SELECT * FROM test.t2;
a
2
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp009.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp009.result
index 35ce0d7b420..d4bcfe9fcb6 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp009.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp009.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
@@ -53,25 +50,33 @@ SELECT * FROM test.t2 ORDER BY a;
a
1
3
+connection slave;
SELECT * FROM test.t2 ORDER BY a;
a
1
3
+connection master;
truncate test.t2;
call test.p1('b');
select * from test.t2 ORDER BY a;
a
2
4
+connection slave;
SELECT * FROM test.t2 ORDER BY a;
a
2
4
+connection master;
truncate test.t2;
SELECT * FROM test.t2 ORDER BY a;
a
+connection slave;
SELECT * FROM test.t2 ORDER BY a;
a
+connection master;
DROP PROCEDURE test.p1;
DROP TABLE test.t1;
DROP TABLE test.t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp010.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp010.result
index 02567465428..6a15298eddf 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp010.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp010.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP PROCEDURE IF EXISTS test.p3;
@@ -26,8 +23,10 @@ SELECT * FROM test.t1 ORDER BY a;
a
1
2
+connection slave;
show tables;
Tables_in_test
+connection master;
CREATE PROCEDURE test.p3()
BEGIN
INSERT INTO test.t2 VALUES(7);
@@ -44,13 +43,17 @@ SELECT * FROM test.t2 ORDER BY a;
a
6
7
+connection slave;
SELECT * FROM test.t2 ORDER BY a;
a
6
7
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP PROCEDURE IF EXISTS test.p3;
DROP PROCEDURE IF EXISTS test.p4;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp011.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp011.result
index e35c9f21adb..53a9a964b53 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp011.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp011.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP PROCEDURE IF EXISTS test.p3;
@@ -58,6 +55,8 @@ ALTER TABLE test.t2 DROP COLUMN to_drop;
INSERT INTO test.t2 VALUES ('gone',NULL,'STM',RAND());
END|
CALL test.p1();
+connection slave;
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP PROCEDURE IF EXISTS test.p3;
@@ -67,3 +66,5 @@ DROP PROCEDURE IF EXISTS test.p6;
DROP PROCEDURE IF EXISTS test.p7;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_sp012.result b/mysql-test/suite/engines/funcs/r/rpl_row_sp012.result
index 4aa16cbf6bd..65cc566256a 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_sp012.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_sp012.result
@@ -1,12 +1,10 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP PROCEDURE IF EXISTS test.p3;
+create user user1@localhost;
grant usage on *.* to user1@localhost;
flush privileges;
SELECT CURRENT_USER();
@@ -21,9 +19,10 @@ CREATE PROCEDURE test.p3 () SQL SECURITY INVOKER CALL test.p1();
GRANT EXECUTE ON PROCEDURE p1 TO user1@localhost;
GRANT EXECUTE ON PROCEDURE p2 TO user1@localhost;
GRANT EXECUTE ON PROCEDURE p3 TO user1@localhost;
-
-<******** Master user1 p3 & p2 calls *******>
-----------------------------------------------
+set sql_mode=default;
+connection slave;
+connect muser1,localhost,user1,,;
+connection muser1;
SELECT CURRENT_USER();
CURRENT_USER()
user1@localhost
@@ -36,9 +35,9 @@ user1@localhost user1@localhost
CALL test.p2();
CURRENT_USER() USER()
root@localhost user1@localhost
-
-<******** Slave user1 p3 & p2 calls *******>
----------------------------------------------
+connect suser1,127.0.0.1,user1,,test,$SLAVE_MYPORT,;
+connection master;
+connection suser1;
SELECT CURRENT_USER();
CURRENT_USER()
user1@localhost
@@ -51,9 +50,12 @@ user1@localhost user1@localhost
CALL test.p2();
CURRENT_USER() USER()
root@localhost user1@localhost
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p3;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
-DROP USER 'user1'@'localhost';
+DROP USER user1@localhost;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_stop_middle.result b/mysql-test/suite/engines/funcs/r/rpl_row_stop_middle.result
index 46ca5748174..07be1bfebfe 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_stop_middle.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_stop_middle.result
@@ -1,11 +1,10 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create table t1 (a int not null auto_increment primary key, b int, key(b));
-stop slave;
+connection slave;
+include/stop_slave.inc
+connection master;
INSERT INTO t1 (a) VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
INSERT INTO t1 (a) SELECT null FROM t1;
INSERT INTO t1 (a) SELECT null FROM t1;
@@ -20,7 +19,9 @@ INSERT INTO t1 (a) SELECT null FROM t1;
INSERT INTO t1 (a) SELECT null FROM t1;
INSERT INTO t1 (a) SELECT null FROM t1;
INSERT INTO t1 (a) SELECT null FROM t1;
-start slave;
-stop slave;
+connection slave;
+include/start_slave.inc
+include/stop_slave.inc
drop table t1;
+connection master;
drop table t1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_trig001.result b/mysql-test/suite/engines/funcs/r/rpl_row_trig001.result
index 6665dc6d555..142c6c61865 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_trig001.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_trig001.result
@@ -1,14 +1,10 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
CREATE TABLE test.t1 (n MEDIUMINT NOT NULL, d DATETIME, PRIMARY KEY(n));
CREATE TABLE test.t2 (n MEDIUMINT NOT NULL AUTO_INCREMENT, f FLOAT, d DATETIME, PRIMARY KEY(n));
CREATE TABLE test.t3 (n MEDIUMINT NOT NULL AUTO_INCREMENT, d DATETIME, PRIMARY KEY(n));
INSERT INTO test.t1 VALUES (1,NOW());
-CREATE TRIGGER test.t2_ai AFTER INSERT ON test.t2 FOR EACH ROW UPDATE test.t1 SET d=NOW() where n = 1;//
+CREATE TRIGGER test.t2_ai AFTER INSERT ON test.t2 FOR EACH ROW UPDATE test.t1 SET d=NOW() where n = 1//
CREATE PROCEDURE test.p3()
BEGIN
INSERT INTO test.t3 (d) VALUES (NOW());
@@ -18,9 +14,15 @@ CREATE PROCEDURE test.p2()
BEGIN
INSERT INTO test.t2 (f,d) VALUES (RAND(),NOW());
END//
+connection slave;
+connection master;
+connection master;
+INSERT INTO test.t1 VALUES (1+1, NOW());
+connection slave;
<End test section 2 (Tiggers & SP)>
-----------------------------------
+connection master;
DROP PROCEDURE test.p2;
DROP PROCEDURE test.p3;
DROP TRIGGER test.t2_ai;
@@ -28,3 +30,5 @@ DROP TRIGGER test.t3_bi_t2;
DROP TABLE test.t1;
DROP TABLE test.t2;
DROP TABLE test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_trig002.result b/mysql-test/suite/engines/funcs/r/rpl_row_trig002.result
index 794104db750..8259edd56df 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_trig002.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_trig002.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP TRIGGER test.t2_ai;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
@@ -20,6 +17,7 @@ id domain
3 earthmotherwear.com
4 yahoo.com
5 example.com
+connection slave;
SELECT * FROM test.t1 ORDER BY id;
id domain
1 example.com
@@ -27,6 +25,7 @@ id domain
3 earthmotherwear.com
4 yahoo.com
5 example.com
+connection master;
INSERT INTO test.t3 VALUES ('Yes', 5, NULL, 'spamfilter','scan_incoming');
INSERT INTO test.t3 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
INSERT INTO test.t2 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
@@ -37,6 +36,7 @@ select * from test.t3;
value domain_id mailaccount_id program keey
No 5 NULL spamfilter scan_incoming
Yes 1 NULL spamfilter scan_incoming
+connection slave;
select * from test.t2;
value domain_id mailaccount_id program keey
Yes 1 NULL spamfilter scan_incoming
@@ -44,6 +44,7 @@ select * from test.t3;
value domain_id mailaccount_id program keey
No 5 NULL spamfilter scan_incoming
Yes 1 NULL spamfilter scan_incoming
+connection master;
DELETE FROM test.t1 WHERE id = 1;
SELECT * FROM test.t1 ORDER BY id;
id domain
@@ -51,19 +52,24 @@ id domain
3 earthmotherwear.com
4 yahoo.com
5 example.com
+connection master;
SELECT * FROM test.t1 ORDER BY id;
id domain
2 mysql.com
3 earthmotherwear.com
4 yahoo.com
5 example.com
+connection slave;
SELECT * FROM test.t1 ORDER BY id;
id domain
2 mysql.com
3 earthmotherwear.com
4 yahoo.com
5 example.com
+connection master;
DROP TRIGGER test.t2_ai;
DROP TABLE test.t1;
DROP TABLE test.t2;
DROP TABLE test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_trig003.result b/mysql-test/suite/engines/funcs/r/rpl_row_trig003.result
index 131af933b41..239d5917bc5 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_trig003.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_trig003.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP TRIGGER test.t1_bi;
DROP TRIGGER test.t2_ai;
DROP TRIGGER test.t1_bu;
@@ -69,15 +66,11 @@ INSERT INTO test.t2 VALUES(NULL,0,'Testing MySQL databases is a cool ', 'MySQL C
UPDATE test.t1 SET b1 = 0 WHERE b1 = 1;
INSERT INTO test.t2 VALUES(NULL,1,'This is an after update test.', 'If this works, total will not be zero on the master or slave',1.4321,5.221,0,YEAR(NOW()),NOW());
UPDATE test.t2 SET b1 = 0 WHERE b1 = 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
INSERT INTO test.t1 VALUES(NULL,1,'add some more test data test.', 'and hope for the best', 3.321,5.221,0,YEAR(NOW()),NOW());
DELETE FROM test.t1 WHERE id = 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DELETE FROM test.t2 WHERE id = 1;
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
+connection slave;
+connection master;
DROP TRIGGER test.t1_bi;
DROP TRIGGER test.t2_ai;
DROP TRIGGER test.t1_bu;
@@ -87,3 +80,5 @@ DROP TRIGGER test.t2_ad;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
DROP TABLE IF EXISTS test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_until.result b/mysql-test/suite/engines/funcs/r/rpl_row_until.result
index 457d83496bc..82268ce72eb 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_until.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_until.result
@@ -6,15 +6,14 @@ INSERT INTO t1 VALUES (1),(2),(3),(4);
DROP TABLE t1;
CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
INSERT INTO t2 VALUES (1),(2);
-connection slave;
-connection master;
INSERT INTO t2 VALUES (3),(4);
DROP TABLE t2;
connection slave;
include/stop_slave.inc
RESET SLAVE;
+CHANGE MASTER TO MASTER_USER='root', MASTER_CONNECT_RETRY=1, MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_MYPORT;
connection slave;
-START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_drop_t1
+START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_drop_t1;
include/wait_for_slave_sql_to_stop.inc
SELECT * FROM t1;
n
@@ -23,7 +22,7 @@ n
3
4
include/check_slave_param.inc [Exec_Master_Log_Pos]
-START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
+START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=291;
include/wait_for_slave_sql_to_stop.inc
SELECT * FROM t1;
n
@@ -32,19 +31,12 @@ n
3
4
include/check_slave_param.inc [Exec_Master_Log_Pos]
-START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2
-include/wait_for_slave_sql_to_stop.inc
-SELECT * FROM t2;
-n
-1
-2
-include/check_slave_param.inc [Exec_Master_Log_Pos]
START SLAVE;
include/wait_for_slave_to_start.inc
connection master;
connection slave;
include/stop_slave.inc
-START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_create_t2
+START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_create_t2;
include/wait_for_slave_param.inc [Until_Log_Pos]
include/wait_for_slave_sql_to_stop.inc
include/check_slave_param.inc [Exec_Master_Log_Pos]
@@ -58,10 +50,25 @@ START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=MASTER_LOG_POS;
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
-START SLAVE;
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
-Warnings:
-Note 1254 Slave is already running
include/stop_slave.inc
RESET SLAVE;
+include/start_slave.inc
+include/rpl_reset.inc
+connection master;
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1);
+connection slave;
+include/stop_slave_sql.inc
+connection master;
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+include/sync_slave_io_with_master.inc
+start slave until relay_log_file='slave-relay-bin.000002', relay_log_pos=relay_log_pos;
+include/wait_for_slave_sql_to_stop.inc
+include/assert.inc [table t1 should have two rows.]
+include/start_slave.inc
+connection master;
+DROP TABLE t1;
+connection slave;
include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_row_view01.result b/mysql-test/suite/engines/funcs/r/rpl_row_view01.result
index a4b8d0a05e3..5c9944e75e0 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_row_view01.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_row_view01.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
create database if not exists mysqltest1;
DROP VIEW IF EXISTS mysqltest1.v1;
DROP VIEW IF EXISTS mysqltest1.v2;
@@ -35,6 +32,7 @@ a c c2
1 Thank GOD
2 it is
3 Friday TGIF
+connection slave;
SELECT * FROM mysqltest1.v2;
qty price value
3 50 150
@@ -45,6 +43,7 @@ a c c2
1 Thank GOD
2 it is
3 Friday TGIF
+connection master;
INSERT INTO mysqltest1.t5 SELECT * FROM mysqltest1.v2;
INSERT INTO mysqltest1.t3 SELECT * FROM mysqltest1.v1;
SELECT * FROM mysqltest1.t5 ORDER BY qty;
@@ -57,6 +56,7 @@ a c c2
1 Thank GOD
2 it is
3 Friday TGIF
+connection slave;
SELECT * FROM mysqltest1.t5 ORDER BY qty;
qty price total
3 50 150
@@ -67,6 +67,7 @@ a c c2
1 Thank GOD
2 it is
3 Friday TGIF
+connection master;
INSERT INTO mysqltest1.v4 VALUES (4,'TEST');
SELECT * FROM mysqltest1.t1 ORDER BY a;
a c
@@ -79,6 +80,7 @@ a c
2 it
3 Friday
4 TEST
+connection slave;
SELECT * FROM mysqltest1.t1 ORDER BY a;
a c
1 Thank
@@ -90,6 +92,7 @@ a c
2 it
3 Friday
4 TEST
+connection master;
DROP VIEW IF EXISTS mysqltest1.v1;
DROP VIEW IF EXISTS mysqltest1.v2;
DROP VIEW IF EXISTS mysqltest1.v3;
@@ -99,3 +102,5 @@ DROP TABLE IF EXISTS mysqltest1.t1;
DROP TABLE IF EXISTS mysqltest1.t2;
DROP TABLE IF EXISTS mysqltest1.t4;
DROP DATABASE mysqltest1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_server_id1.result b/mysql-test/suite/engines/funcs/r/rpl_server_id1.result
index 700bc270f07..001d1151843 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_server_id1.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_server_id1.result
@@ -1,19 +1,21 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
create table t1 (n int);
reset master;
-stop slave;
+include/stop_slave.inc
change master to master_port=SLAVE_PORT;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
- 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # # 0 0 0 107 None 0 No NULL No 0 0 1
+Slave_IO_Running = 'No'
+Slave_SQL_Running = 'No'
+Last_SQL_Errno = '0'
+Last_SQL_Error = ''
+Exec_Master_Log_Pos = '0'
start slave;
insert into t1 values (1);
-show status like "slave_running";
-Variable_name Value
-Slave_running OFF
+include/wait_for_slave_param.inc [Last_IO_Errno]
+Last_IO_Errno = '1593'
+Last_IO_Error = 'Fatal error: The slave I/O thread stops because master and slave have equal MySQL server ids; these ids must be different for replication to work (or the --replicate-same-server-id option must be used on slave but this does not always make sense; please check the manual before using it).'
+include/stop_slave.inc
+reset slave;
+reset master;
drop table t1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_server_id2.result b/mysql-test/suite/engines/funcs/r/rpl_server_id2.result
index f8d24f70776..74145645920 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_server_id2.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_server_id2.result
@@ -1,21 +1,38 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
create table t1 (n int);
reset master;
stop slave;
+include/wait_for_slave_to_stop.inc
change master to master_port=SLAVE_PORT;
-show slave status;
-Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
- 127.0.0.1 root SLAVE_PORT 1 4 slave-relay-bin.000001 4 No No # 0 0 0 107 None 0 No NULL No 0 0 1
start slave;
+include/wait_for_slave_to_start.inc
insert into t1 values (1);
select * from t1;
n
1
1
stop slave;
+include/wait_for_slave_to_stop.inc
+drop table t1;
+connection master;
+reset master;
+create table t1(n int);
+create table t2(n int);
+connection slave;
+change master to master_port=MASTER_PORT;
+start slave until master_log_file='master-bin.000001', master_log_pos=UNTIL_POS;
+include/wait_for_slave_io_to_start.inc
+include/wait_for_slave_sql_to_stop.inc
+*** checking until position execution: must be only t1 in the list ***
+show tables;
+Tables_in_test
+t1
+connection slave;
+start slave sql_thread;
+connection master;
drop table t1;
+drop table t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_session_var.result b/mysql-test/suite/engines/funcs/r/rpl_session_var.result
index b5b4b815ade..67863583f8d 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_session_var.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_session_var.result
@@ -1,12 +1,8 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1(a varchar(100),b int);
set @@session.sql_mode=pipes_as_concat;
insert into t1 values('My'||'SQL', 1);
@@ -16,10 +12,12 @@ select * from t1 where b<3 order by a;
a b
1 2
MySQL 1
+connection slave;
select * from t1 where b<3 order by a;
a b
1 2
MySQL 1
+connection master;
set @@session.sql_mode=ignore_space;
insert into t1 values(password ('MySQL'), 3);
set @@session.sql_mode=ansi_quotes;
@@ -37,7 +35,24 @@ insert into t2 select 2,a from t1 where a is null;
select * from t2 order by b;
b a
1 1
+connection slave;
select * from t2 order by b;
b a
1 1
+connection master;
drop table t1,t2;
+connection slave;
+connection master;
+CREATE TABLE t1 (
+`id` int(11) NOT NULL auto_increment,
+`data` varchar(100),
+PRIMARY KEY (`id`)
+) ENGINE=MyISAM;
+INSERT INTO t1(data) VALUES(SESSION_USER());
+connection slave;
+SELECT length(data) < 100 FROM t1;
+length(data) < 100
+1
+connection master;
+drop table t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sf.result b/mysql-test/suite/engines/funcs/r/rpl_sf.result
index 9cb9c9e8354..e692e31f908 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_sf.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_sf.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
set global log_bin_trust_function_creators=0;
set binlog_format=STATEMENT;
create function fn16456()
@@ -27,3 +23,4 @@ set binlog_format=STATEMENT;
select fn16456();
ERROR HY000: This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)
drop function fn16456;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_skip_error.result b/mysql-test/suite/engines/funcs/r/rpl_skip_error.result
index 248ce5b52c3..98c165e7db0 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_skip_error.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_skip_error.result
@@ -1,16 +1,18 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1 (n int not null primary key);
+connection slave;
insert into t1 values (1);
+connection master;
insert into t1 values (1);
insert into t1 values (2),(3);
+connection slave;
select * from t1 ORDER BY n;
n
1
2
3
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_slave_status.result b/mysql-test/suite/engines/funcs/r/rpl_slave_status.result
index 0e438d294fd..1c81cec2577 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_slave_status.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_slave_status.result
@@ -1,63 +1,48 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl';
-stop slave;
-change master to master_user='rpl',master_password='rpl';
-start slave;
-drop table if exists t1;
-create table t1 (n int);
-insert into t1 values (1);
-select * from t1;
+include/master-slave.inc
+[connection master]
+==== Create new replication user ====
+connection master;
+GRANT REPLICATION SLAVE ON *.* TO rpl@127.0.0.1 IDENTIFIED BY 'rpl';
+connection slave;
+include/stop_slave.inc
+set @save_relay_log_purge=@@global.relay_log_purge;
+set @@global.relay_log_purge=0;
+CHANGE MASTER TO master_user='rpl', master_password='rpl';
+select @@global.relay_log_purge;
+@@global.relay_log_purge
+0
+set @@global.relay_log_purge=1;
+CHANGE MASTER TO master_user='rpl', master_password='rpl';
+select @@global.relay_log_purge;
+@@global.relay_log_purge
+1
+set @@global.relay_log_purge=@save_relay_log_purge;
+CHANGE MASTER TO master_user='rpl', master_password='rpl';
+include/start_slave.inc
+==== Do replication as new user ====
+connection master;
+CREATE TABLE t1 (n INT);
+INSERT INTO t1 VALUES (1);
+connection slave;
+SELECT * FROM t1;
n
1
-drop user rpl@127.0.0.1;
-flush privileges;
-stop slave;
-start slave;
-show slave status;
-Slave_IO_State #
-Master_Host 127.0.0.1
-Master_User rpl
-Master_Port MASTER_MYPORT
-Connect_Retry 1
-Master_Log_File master-bin.000001
-Read_Master_Log_Pos #
-Relay_Log_File #
-Relay_Log_Pos #
-Relay_Master_Log_File master-bin.000001
-Slave_IO_Running Connecting
-Slave_SQL_Running Yes
-Replicate_Do_DB
-Replicate_Ignore_DB
-Replicate_Do_Table
-Replicate_Ignore_Table
-Replicate_Wild_Do_Table
-Replicate_Wild_Ignore_Table
-Last_Errno 0
-Last_Error
-Skip_Counter 0
-Exec_Master_Log_Pos #
-Relay_Log_Space #
-Until_Condition None
-Until_Log_File
-Until_Log_Pos 0
-Master_SSL_Allowed No
-Master_SSL_CA_File
-Master_SSL_CA_Path
-Master_SSL_Cert
-Master_SSL_Cipher
-Master_SSL_Key
-Seconds_Behind_Master NULL
-Master_SSL_Verify_Server_Cert No
-Last_IO_Errno #
-Last_IO_Error #
-Last_SQL_Errno 0
-Last_SQL_Error
-Replicate_Ignore_Server_Ids
-Master_Server_Id 1
-drop table t1;
-drop table t1;
+==== Delete new replication user ====
+connection master;
+DROP USER rpl@127.0.0.1;
+FLUSH PRIVILEGES;
+connection slave;
+==== Restart slave without privileges =====
+include/stop_slave.inc
+START SLAVE;
+include/wait_for_slave_sql_to_start.inc
+include/wait_for_slave_io_to_stop.inc
+==== Verify that Slave IO thread stopped with error ====
+include/wait_for_slave_io_error.inc [errno=1045]
+==== Cleanup (Note that slave IO thread is not running) ====
+include/stop_slave_sql.inc
+CHANGE MASTER TO MASTER_USER = 'root', MASTER_PASSWORD = '';
+include/rpl_reset.inc
+connection master;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff b/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff
new file mode 100644
index 00000000000..da41283e42f
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff
@@ -0,0 +1,479 @@
+--- /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.result~ 2021-03-19 17:27:12.935559866 +0100
++++ /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.reject 2021-03-19 17:27:14.071534938 +0100
+@@ -126,12 +126,15 @@
+ show warnings;
+ Level Code Message
+ Error 1062 Duplicate entry '20' for key 'a'
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ Note 4094 At line 4 in mysqltest1.foo4
+ select * from t2;
+ a
++20
+ connection slave;
+ select * from t2;
+ a
++20
+ select * from mysql.proc where name="foo4" and db='mysqltest1';
+ db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
+ mysqltest1 foo4 PROCEDURE foo4 SQL CONTAINS_SQL YES DEFINER begin
+@@ -171,20 +174,16 @@
+ insert into t2 values(fn1(21));
+ select * from t1;
+ a
+-15
+ 20
+ 21
+-5
+ select * from t2;
+ a
+ 23
+ connection slave;
+ select * from t1;
+ a
+-15
+ 20
+ 21
+-5
+ select * from t2;
+ a
+ 23
+@@ -292,13 +291,18 @@
+ Warnings:
+ Error 1062 Duplicate entry '100' for key 'a'
+ Note 4094 At line 3 in mysqltest1.fn1
++Warning 1196 Some non-transactional changed tables couldn't be rolled back
+ select fn1(20);
+ ERROR 23000: Duplicate entry '20' for key 'a'
+ select * from t2;
+ a
++20
++100
+ connection slave;
+ select * from t2;
+ a
++20
++100
+ connection con1;
+ create trigger trg before insert on t1 for each row set new.a= 10;
+ ERROR 42000: TRIGGER command denied to user 'zedjzlcsjhd'@'localhost' for table 't1'
+@@ -472,13 +476,13 @@
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values ( NAME_CONST('b',8))
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (unix_timestamp())
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo2`()
+ select * from mysqltest1.t1
+@@ -509,24 +513,24 @@
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (15)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; alter procedure foo4 sql security invoker
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (5)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t2
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; alter table t2 add unique (a)
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -537,6 +541,9 @@
+ begin
+ insert into t2 values(20),(20);
+ end
++master-bin.000001 # Gtid # # BEGIN GTID #-#-#
++master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(20),(20)
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo4
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -557,10 +564,10 @@
+ master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`fn1`(20)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(fn1(21))
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; drop function fn1
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -571,10 +578,10 @@
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(fn1())
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` FUNCTION `fn2`() RETURNS int(11)
+ NO SQL
+@@ -589,7 +596,7 @@
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t2
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; alter table t2 add unique (a)
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -601,21 +608,27 @@
+ return 10;
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
++master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`fn1`(100)
++master-bin.000001 # Query # # COMMIT
++master-bin.000001 # Gtid # # BEGIN GTID #-#-#
++master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`fn1`(20)
++master-bin.000001 # Query # # COMMIT
++master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger trg before insert on t1 for each row set new.a= 10
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (1)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; drop trigger trg
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (1)
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+ READS SQL DATA
+@@ -647,7 +660,7 @@
+ master-bin.000001 # Query # # use `test`; create table t1 (a int)
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `test`; insert into t1 (a) values (f1())
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `test`; drop view v1
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -663,7 +676,7 @@
+ INSERT INTO t1 VALUES(arg)
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES( NAME_CONST('arg',_latin1'test' COLLATE 'latin1_swedish_ci'))
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `test`; DROP PROCEDURE p1
+ master-bin.000001 # Gtid # # GTID #-#-#
+@@ -697,7 +710,7 @@
+ begin end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest2`; insert into t values ( 1 )
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ master-bin.000001 # Gtid # # GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest2`; CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+ begin
+@@ -706,7 +719,7 @@
+ end
+ master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+ master-bin.000001 # Query # # use `mysqltest`; SELECT `mysqltest2`.`f1`()
+-master-bin.000001 # Xid # # COMMIT /* XID */
++master-bin.000001 # Query # # COMMIT
+ connection slave;
+ set @@global.log_bin_trust_function_creators= @old_log_bin_trust_function_creators;
+ connection master;
+@@ -801,19 +814,25 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values ( NAME_CONST('b',8))
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values (unix_timestamp())
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ delete from t1
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ CREATE DEFINER=`root`@`localhost` PROCEDURE `foo2`()
+ select * from mysqltest1.t1
+@@ -858,21 +877,27 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t2 values(3)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ SET @@session.sql_mode=0/*!*/;
+ insert into t1 values (15)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ SET @@session.sql_mode=1411383296/*!*/;
+ insert into t2 values(3)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ SET @@session.sql_mode=0/*!*/;
+ alter procedure foo4 sql security invoker
+@@ -883,20 +908,26 @@
+ SET @@session.sql_mode=1411383296/*!*/;
+ insert into t2 values(3)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values (5)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ SET @@session.sql_mode=0/*!*/;
+ delete from t2
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ alter table t2 add unique (a)
+ /*!*/;
+@@ -910,6 +941,14 @@
+ insert into t2 values(20),(20);
+ end
+ /*!*/;
++START TRANSACTION
++/*!*/;
++SET TIMESTAMP=t/*!*/;
++insert into t2 values(20),(20)
++/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ drop procedure foo4
+ /*!*/;
+@@ -943,13 +982,17 @@
+ SET TIMESTAMP=t/*!*/;
+ SELECT `mysqltest1`.`fn1`(20)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ insert into t2 values(fn1(21))
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ drop function fn1
+ /*!*/;
+@@ -965,13 +1008,17 @@
+ SET TIMESTAMP=t/*!*/;
+ delete from t1
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values(fn1())
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ SET @@session.sql_mode=1411383296/*!*/;
+ CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` FUNCTION `fn2`() RETURNS int(11)
+@@ -993,7 +1040,9 @@
+ SET TIMESTAMP=t/*!*/;
+ delete from t2
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ alter table t2 add unique (a)
+ /*!*/;
+@@ -1010,9 +1059,27 @@
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
++SELECT `mysqltest1`.`fn1`(100)
++/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
++START TRANSACTION
++/*!*/;
++SET TIMESTAMP=t/*!*/;
++SELECT `mysqltest1`.`fn1`(20)
++/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
++START TRANSACTION
++/*!*/;
++SET TIMESTAMP=t/*!*/;
+ delete from t1
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ CREATE DEFINER=`root`@`localhost` trigger trg before insert on t1 for each row set new.a= 10
+ /*!*/;
+@@ -1021,13 +1088,17 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values (1)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ START TRANSACTION
+ /*!*/;
+ SET TIMESTAMP=t/*!*/;
+ delete from t1
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ drop trigger trg
+ /*!*/;
+@@ -1036,7 +1107,9 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 values (1)
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+ READS SQL DATA
+@@ -1081,7 +1154,9 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t1 (a) values (f1())
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ drop view v1
+ /*!*/;
+@@ -1106,7 +1181,9 @@
+ SET TIMESTAMP=t/*!*/;
+ INSERT INTO t1 VALUES( NAME_CONST('arg',_latin1'test' COLLATE 'latin1_swedish_ci'))
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ DROP PROCEDURE p1
+ /*!*/;
+@@ -1158,7 +1235,9 @@
+ SET TIMESTAMP=t/*!*/;
+ insert into t values ( 1 )
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+ begin
+@@ -1172,7 +1251,9 @@
+ SET TIMESTAMP=t/*!*/;
+ SELECT `mysqltest2`.`f1`()
+ /*!*/;
+-COMMIT/*!*/;
++SET TIMESTAMP=t/*!*/;
++COMMIT
++/*!*/;
+ SET TIMESTAMP=t/*!*/;
+ drop database mysqltest
+ /*!*/;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sp.result b/mysql-test/suite/engines/funcs/r/rpl_sp.result
index 873ee6b03c5..0e5929bcbd7 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_sp.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_sp.result
@@ -1,14 +1,13 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+set local sql_mode='';
drop database if exists mysqltest1;
create database mysqltest1;
use mysqltest1;
create table t1 (a varchar(100));
+connection slave;
use mysqltest1;
+connection master;
create procedure foo()
begin
declare b int;
@@ -17,7 +16,7 @@ insert into t1 values (b);
insert into t1 values (unix_timestamp());
end|
select * from mysql.proc where name='foo' and db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
mysqltest1 foo PROCEDURE foo SQL CONTAINS_SQL NO DEFINER begin
declare b int;
set b = 8;
@@ -28,9 +27,10 @@ declare b int;
set b = 8;
insert into t1 values (b);
insert into t1 values (unix_timestamp());
-end
+end NONE
+connection slave;
select * from mysql.proc where name='foo' and db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
mysqltest1 foo PROCEDURE foo SQL CONTAINS_SQL NO DEFINER begin
declare b int;
set b = 8;
@@ -41,17 +41,20 @@ declare b int;
set b = 8;
insert into t1 values (b);
insert into t1 values (unix_timestamp());
-end
+end NONE
+connection master;
set timestamp=1000000000;
call foo();
select * from t1;
a
8
1000000000
+connection slave;
select * from t1;
a
8
1000000000
+connection master;
delete from t1;
create procedure foo2()
select * from mysqltest1.t1;
@@ -70,6 +73,8 @@ grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1;
SELECT 1;
1
1
+connect con1,127.0.0.1,zedjzlcsjhd,,mysqltest1,$MASTER_MYPORT,;
+connection con1;
create procedure foo4()
deterministic
begin
@@ -78,6 +83,7 @@ insert into t1 values (5);
end|
call foo4();
Got one of the listed errors
+connection master;
call foo3();
show warnings;
Level Code Message
@@ -96,6 +102,7 @@ a
3
3
3
+connection slave;
select * from t1;
a
15
@@ -105,20 +112,41 @@ a
3
3
3
+connection master;
+delete from t2;
+alter table t2 add unique (a);
+drop procedure foo4;
+create procedure foo4()
+deterministic
+begin
+insert into t2 values(20),(20);
+end|
+call foo4();
+ERROR 23000: Duplicate entry '20' for key 'a'
+show warnings;
+Level Code Message
+Error 1062 Duplicate entry '20' for key 'a'
+Note 4094 At line 4 in mysqltest1.foo4
+select * from t2;
+a
+connection slave;
+select * from t2;
+a
select * from mysql.proc where name="foo4" and db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
-mysqltest1 foo4 PROCEDURE foo4 SQL CONTAINS_SQL YES INVOKER begin
-insert into t2 values(3);
-insert into t1 values (5);
-end zedjzlcsjhd@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
-insert into t2 values(3);
-insert into t1 values (5);
-end
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
+mysqltest1 foo4 PROCEDURE foo4 SQL CONTAINS_SQL YES DEFINER begin
+insert into t2 values(20),(20);
+end root@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
+insert into t2 values(20),(20);
+end NONE
+connection master;
drop procedure foo4;
select * from mysql.proc where name="foo4" and db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
+connection slave;
select * from mysql.proc where name="foo4" and db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
+connection master;
drop procedure foo;
drop procedure foo2;
drop procedure foo3;
@@ -143,18 +171,24 @@ fn1(20)
insert into t2 values(fn1(21));
select * from t1;
a
+15
20
21
+5
select * from t2;
a
23
+connection slave;
select * from t1;
a
+15
20
21
+5
select * from t2;
a
23
+connection master;
drop function fn1;
create function fn1()
returns int
@@ -167,6 +201,7 @@ ERROR HY000: This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA
delete from t1;
set timestamp=1000000000;
insert into t1 values(fn1());
+connection con1;
create function fn2()
returns int
no sql
@@ -174,15 +209,21 @@ begin
return unix_timestamp();
end|
ERROR HY000: You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)
+connection master;
+set @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators;
set global log_bin_trust_function_creators=0;
set global log_bin_trust_function_creators=1;
+connection slave;
+set @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators;
set global log_bin_trust_function_creators=1;
+connection con1;
create function fn2()
returns int
no sql
begin
return unix_timestamp();
end|
+connection master;
create function fn3()
returns int
not deterministic
@@ -194,79 +235,115 @@ select fn3();
fn3()
0
select * from mysql.proc where db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
mysqltest1 fn1 FUNCTION fn1 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
end root@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
-end
+end NONE
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
-end zedjzlcsjhd@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
+end zedjzlcsjhd@localhost # # STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
-end
+end NONE
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
return 0;
end root@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
return 0;
-end
+end NONE
select * from t1;
a
1000000000
+connection slave;
use mysqltest1;
select * from t1;
a
1000000000
select * from mysql.proc where db='mysqltest1';
-db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8
+db name type specific_name language sql_data_access is_deterministic security_type param_list returns body definer created modified sql_mode comment character_set_client collation_connection db_collation body_utf8 aggregate
mysqltest1 fn1 FUNCTION fn1 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
end root@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
-end
+end NONE
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
-end zedjzlcsjhd@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
+end zedjzlcsjhd@localhost # # STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
-end
+end NONE
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
return 0;
end root@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
return 0;
-end
+end NONE
+connection master;
+delete from t2;
+alter table t2 add unique (a);
+Warnings:
+Note 1831 Duplicate index `a_2`. This is deprecated and will be disallowed in a future release
+drop function fn1;
+create function fn1(x int)
+returns int
+begin
+insert into t2 values(x),(x);
+return 10;
+end|
+do fn1(100);
+Warnings:
+Error 1062 Duplicate entry '100' for key 'a'
+Note 4094 At line 3 in mysqltest1.fn1
+select fn1(20);
+ERROR 23000: Duplicate entry '20' for key 'a'
+select * from t2;
+a
+connection slave;
+select * from t2;
+a
+connection con1;
create trigger trg before insert on t1 for each row set new.a= 10;
ERROR 42000: TRIGGER command denied to user 'zedjzlcsjhd'@'localhost' for table 't1'
+connection master;
delete from t1;
create trigger trg before insert on t1 for each row set new.a= 10;
insert into t1 values (1);
select * from t1;
a
10
+connection slave;
select * from t1;
a
10
+connection master;
delete from t1;
drop trigger trg;
insert into t1 values (1);
select * from t1;
a
1
+connection slave;
select * from t1;
a
1
+connection master;
create procedure foo()
not deterministic
reads sql data
select * from t1;
+connection slave;
call foo();
a
1
+connection master;
drop procedure foo;
+connection slave;
+connection master;
drop function fn1;
drop database mysqltest1;
drop user "zedjzlcsjhd"@127.0.0.1;
use test;
+connection slave;
use test;
+connection master;
drop function if exists f1;
create function f1() returns int reads sql data
begin
@@ -285,9 +362,12 @@ a
1
drop view v1;
drop function f1;
+connection slave;
+connection slave;
select * from t1;
a
1
+connection master;
DROP PROCEDURE IF EXISTS p1;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(col VARCHAR(10));
@@ -297,21 +377,23 @@ CALL p1('test');
SELECT * FROM t1;
col
test
+connection slave;
SELECT * FROM t1;
col
test
+connection master;
DROP PROCEDURE p1;
---> Test for BUG#20438
---> Preparing environment...
----> connection: master
+connection master;
DROP PROCEDURE IF EXISTS p1;
DROP FUNCTION IF EXISTS f1;
---> Synchronizing slave with master...
-
----> connection: master
+connection slave;
+connection master;
---> Creating procedure...
/*!50003 CREATE PROCEDURE p1() SET @a = 1 */;
@@ -328,7 +410,7 @@ f1 CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
RETURN 0 latin1 latin1_swedish_ci latin1_swedish_ci
---> Synchronizing slave with master...
----> connection: master
+connection slave;
---> Checking on slave...
SHOW CREATE PROCEDURE p1;
@@ -339,17 +421,17 @@ SHOW CREATE FUNCTION f1;
Function sql_mode Create Function character_set_client collation_connection Database Collation
f1 CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
RETURN 0 latin1 latin1_swedish_ci latin1_swedish_ci
-
----> connection: master
+connection master;
---> Cleaning up...
DROP PROCEDURE p1;
DROP FUNCTION f1;
+connection slave;
+connection master;
+connection master;
drop table t1;
-set global log_bin_trust_function_creators=0;
-set global log_bin_trust_function_creators=0;
-End of 5.0 tests
-reset master;
+connection slave;
+connection master;
drop database if exists mysqltest;
drop database if exists mysqltest2;
create database mysqltest;
@@ -360,6 +442,871 @@ create procedure mysqltest.test() begin end;
insert into t values ( 1 );
create procedure `\\`.test() begin end;
ERROR 42000: Unknown database '\\'
+connection master;
+create function f1 () returns int
+begin
+insert into t values (1);
+return 0;
+end|
+connection slave;
+connection master;
+use mysqltest;
+set @a:= mysqltest2.f1();
+connection slave;
+connection master;
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # drop database if exists mysqltest1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # create database mysqltest1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a varchar(100))
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+begin
+declare b int;
+set b = 8;
+insert into t1 values (b);
+insert into t1 values (unix_timestamp());
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values ( NAME_CONST('b',8))
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (unix_timestamp())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo2`()
+select * from mysqltest1.t1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; alter procedure foo2 contains sql
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; DROP TABLE `t1` /* generated by server */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; create table t1 (a int)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; create table t2 like t1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo3`()
+ DETERMINISTIC
+insert into t1 values (15)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; grant CREATE ROUTINE, EXECUTE on mysqltest1.* to "zedjzlcsjhd"@127.0.0.1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; grant SELECT on mysqltest1.t1 to "zedjzlcsjhd"@127.0.0.1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` PROCEDURE `foo4`()
+ DETERMINISTIC
+begin
+insert into t2 values(3);
+insert into t1 values (5);
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (15)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; alter procedure foo4 sql security invoker
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(3)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (5)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t2
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; alter table t2 add unique (a)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo4
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo4`()
+ DETERMINISTIC
+begin
+insert into t2 values(20),(20);
+end
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo4
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo2
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo3
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`(x int) RETURNS int(11)
+ DETERMINISTIC
+begin
+insert into t1 values (x);
+return x+2;
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete t1,t2 from t1,t2
+master-bin.000001 # Query # # COMMIT
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; SELECT `mysqltest1`.`fn1`(20)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t2 values(fn1(21))
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop function fn1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`() RETURNS int(11)
+ NO SQL
+begin
+return unix_timestamp();
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values(fn1())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` FUNCTION `fn2`() RETURNS int(11)
+ NO SQL
+begin
+return unix_timestamp();
+end
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` FUNCTION `fn3`() RETURNS int(11)
+ READS SQL DATA
+begin
+return 0;
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t2
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; alter table t2 add unique (a)
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop function fn1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`(x int) RETURNS int(11)
+begin
+insert into t2 values(x),(x);
+return 10;
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` trigger trg before insert on t1 for each row set new.a= 10
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (1)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; delete from t1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop trigger trg
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; insert into t1 values (1)
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+ READS SQL DATA
+select * from t1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop procedure foo
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest1`; drop function fn1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # drop database mysqltest1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # drop user "zedjzlcsjhd"@127.0.0.1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; drop function if exists f1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+ READS SQL DATA
+begin
+declare var integer;
+declare c cursor for select a from v1;
+open c;
+fetch c into var;
+close c;
+return var;
+end
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 as a
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; create table t1 (a int)
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `test`; insert into t1 (a) values (f1())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; drop view v1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; drop function f1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP PROCEDURE IF EXISTS p1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE TABLE t1(col VARCHAR(10))
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`(arg VARCHAR(10))
+INSERT INTO t1 VALUES(arg)
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES( NAME_CONST('arg',_latin1'test' COLLATE 'latin1_swedish_ci'))
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP PROCEDURE p1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP PROCEDURE IF EXISTS p1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP FUNCTION IF EXISTS f1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`()
+SET @a = 1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+RETURN 0
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP PROCEDURE p1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP FUNCTION f1
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # drop database if exists mysqltest
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # drop database if exists mysqltest2
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # create database mysqltest
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # create database mysqltest2
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest2`; create table t ( t integer )
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest2`; CREATE DEFINER=`root`@`localhost` PROCEDURE `mysqltest`.`test`()
+begin end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest2`; insert into t values ( 1 )
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Gtid # # GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest2`; CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+begin
+insert into t values (1);
+return 0;
+end
+master-bin.000001 # Gtid # # BEGIN GTID #-#-#
+master-bin.000001 # Query # # use `mysqltest`; SELECT `mysqltest2`.`f1`()
+master-bin.000001 # Xid # # COMMIT /* XID */
+connection slave;
+set @@global.log_bin_trust_function_creators= @old_log_bin_trust_function_creators;
+connection master;
+set @@global.log_bin_trust_function_creators= @old_log_bin_trust_function_creators;
drop database mysqltest;
drop database mysqltest2;
-End of 5.1 tests
+connection slave;
+connection master;
+use test;
+/*!50001 create procedure `mysqltestbug36570_p1`() */
+begin
+select 1;
+end|
+use mysql|
+create procedure test.` mysqltestbug36570_p2`(/*!50001 a int*/)`label`:
+begin
+select a;
+end|
+/*!50001 create function test.mysqltestbug36570_f1() */
+returns int
+/*!50001 deterministic */
+begin
+return 3;
+end|
+use test|
+show procedure status like '%mysqltestbug36570%';
+Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
+test mysqltestbug36570_p2 PROCEDURE root@localhost t t DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
+test mysqltestbug36570_p1 PROCEDURE root@localhost t t DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
+show create procedure ` mysqltestbug36570_p2`;
+Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
+ mysqltestbug36570_p2 CREATE DEFINER=`root`@`localhost` PROCEDURE ` mysqltestbug36570_p2`( a int)
+`label`:
+begin
+select a;
+end latin1 latin1_swedish_ci latin1_swedish_ci
+connection slave;
+connection slave;
+show procedure status like '%mysqltestbug36570%';
+Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
+test mysqltestbug36570_p2 PROCEDURE root@localhost t t DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
+test mysqltestbug36570_p1 PROCEDURE root@localhost t t DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
+show create procedure ` mysqltestbug36570_p2`;
+Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
+ mysqltestbug36570_p2 CREATE DEFINER=`root`@`localhost` PROCEDURE ` mysqltestbug36570_p2`( a int)
+`label`:
+begin
+select a;
+end latin1 latin1_swedish_ci latin1_swedish_ci
+call ` mysqltestbug36570_p2`(42);
+a
+42
+show function status like '%mysqltestbug36570%';
+Db Name Type Definer Modified Created Security_type Comment character_set_client collation_connection Database Collation
+test mysqltestbug36570_f1 FUNCTION root@localhost t t DEFINER latin1 latin1_swedish_ci latin1_swedish_ci
+connection master;
+flush logs;
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
+/*!40019 SET @@session.max_insert_delayed_threads=0*/;
+/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
+DELIMITER /*!*/;
+ROLLBACK/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1/*!*/;
+SET @@session.sql_mode=0/*!*/;
+SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
+SET @@session.lc_time_names=0/*!*/;
+SET @@session.collation_database=DEFAULT/*!*/;
+drop database if exists mysqltest1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create database mysqltest1
+/*!*/;
+use `mysqltest1`/*!*/;
+SET TIMESTAMP=t/*!*/;
+create table t1 (a varchar(100))
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+begin
+declare b int;
+set b = 8;
+insert into t1 values (b);
+insert into t1 values (unix_timestamp());
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values ( NAME_CONST('b',8))
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values (unix_timestamp())
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete from t1
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `foo2`()
+select * from mysqltest1.t1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+alter procedure foo2 contains sql
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP TABLE `t1` /* generated by server */
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create table t1 (a int)
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create table t2 like t1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `foo3`()
+ DETERMINISTIC
+insert into t1 values (15)
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+grant CREATE ROUTINE, EXECUTE on mysqltest1.* to "zedjzlcsjhd"@127.0.0.1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+grant SELECT on mysqltest1.t1 to "zedjzlcsjhd"@127.0.0.1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
+CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` PROCEDURE `foo4`()
+ DETERMINISTIC
+begin
+insert into t2 values(3);
+insert into t1 values (5);
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t2 values(3)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
+insert into t1 values (15)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
+insert into t2 values(3)
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
+alter procedure foo4 sql security invoker
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
+insert into t2 values(3)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values (5)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
+delete from t2
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+alter table t2 add unique (a)
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo4
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `foo4`()
+ DETERMINISTIC
+begin
+insert into t2 values(20),(20);
+end
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo4
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo2
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo3
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`(x int) RETURNS int(11)
+ DETERMINISTIC
+begin
+insert into t1 values (x);
+return x+2;
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete t1,t2 from t1,t2
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+COMMIT
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SELECT `mysqltest1`.`fn1`(20)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t2 values(fn1(21))
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop function fn1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`() RETURNS int(11)
+ NO SQL
+begin
+return unix_timestamp();
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete from t1
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values(fn1())
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
+CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` FUNCTION `fn2`() RETURNS int(11)
+ NO SQL
+begin
+return unix_timestamp();
+end
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `fn3`() RETURNS int(11)
+ READS SQL DATA
+begin
+return 0;
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete from t2
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+alter table t2 add unique (a)
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop function fn1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `fn1`(x int) RETURNS int(11)
+begin
+insert into t2 values(x),(x);
+return 10;
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete from t1
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` trigger trg before insert on t1 for each row set new.a= 10
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values (1)
+/*!*/;
+COMMIT/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+delete from t1
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop trigger trg
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 values (1)
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `foo`()
+ READS SQL DATA
+select * from t1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop procedure foo
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop function fn1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop database mysqltest1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop user "zedjzlcsjhd"@127.0.0.1
+/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop function if exists f1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+ READS SQL DATA
+begin
+declare var integer;
+declare c cursor for select a from v1;
+open c;
+fetch c into var;
+close c;
+return var;
+end
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 as a
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create table t1 (a int)
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t1 (a) values (f1())
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop view v1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop function f1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP PROCEDURE IF EXISTS p1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP TABLE IF EXISTS `t1` /* generated by server */
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE TABLE t1(col VARCHAR(10))
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`(arg VARCHAR(10))
+INSERT INTO t1 VALUES(arg)
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+INSERT INTO t1 VALUES( NAME_CONST('arg',_latin1'test' COLLATE 'latin1_swedish_ci'))
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP PROCEDURE p1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP PROCEDURE IF EXISTS p1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP FUNCTION IF EXISTS f1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`()
+SET @a = 1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+RETURN 0
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP PROCEDURE p1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP FUNCTION f1
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+DROP TABLE `t1` /* generated by server */
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop database if exists mysqltest
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop database if exists mysqltest2
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create database mysqltest
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+create database mysqltest2
+/*!*/;
+use `mysqltest2`/*!*/;
+SET TIMESTAMP=t/*!*/;
+create table t ( t integer )
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `mysqltest`.`test`()
+begin end
+/*!*/;
+START TRANSACTION
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+insert into t values ( 1 )
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11)
+begin
+insert into t values (1);
+return 0;
+end
+/*!*/;
+START TRANSACTION
+/*!*/;
+use `mysqltest`/*!*/;
+SET TIMESTAMP=t/*!*/;
+SELECT `mysqltest2`.`f1`()
+/*!*/;
+COMMIT/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop database mysqltest
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+drop database mysqltest2
+/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `mysqltestbug36570_p1`()
+begin
+select 1;
+end
+/*!*/;
+use `mysql`/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` PROCEDURE `test`.` mysqltestbug36570_p2`( a int)
+`label`:
+begin
+select a;
+end
+/*!*/;
+SET TIMESTAMP=t/*!*/;
+CREATE DEFINER=`root`@`localhost` FUNCTION `test`.`mysqltestbug36570_f1`() RETURNS int(11)
+ DETERMINISTIC
+begin
+return 3;
+end
+/*!*/;
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
+use test;
+drop procedure mysqltestbug36570_p1;
+drop procedure ` mysqltestbug36570_p2`;
+drop function mysqltestbug36570_f1;
+End of 5.0 tests
+# End of 5.1 tests
+#
+# Test Bug#30977 Concurrent statement using stored
+# function and DROP FUNCTION breaks SBR.
+#
+# Demonstrate that stored function DDL can not go through,
+# or, worse yet, make its way into the binary log, while
+# the stored function is in use.
+# For that, try to insert a result of a stored function
+# into a table. Block the insert in the beginning, waiting
+# on a table lock. While insert is blocked, attempt to
+# drop the routine. Verify that this attempt
+# blocks and waits for INSERT to complete. Commit and
+# reap the chain of events. Master and slave must contain
+# identical data. Statements in the binrary log must be
+# consistent with data in the table.
+#
+connection default;
+drop table if exists t1, t2;
+drop function if exists t1;
+create table t1 (a int);
+create table t2 (a int) as select 1 as a;
+create function f1() returns int deterministic return (select max(a) from t2);
+lock table t2 write;
+connection master;
+# Sending 'insert into t1 (a) values (f1())'...
+insert into t1 (a) values (f1());
+connection master1;
+# Waitng for 'insert into t1 ...' to get blocked on table lock...
+# Sending 'drop function f1'. It will wait till insert finishes.
+drop function f1;;
+connection default;
+# Check that 'drop function f1' gets blocked.
+# Now let's let 'insert' go through...
+unlock tables;
+connection master;
+# Reaping 'insert into t1 (a) values (f1())'...
+connection master1;
+# Reaping 'drop function f1'
+connection master;
+select * from t1;
+a
+1
+connection slave;
+connection slave;
+select * from t1;
+a
+1
+connection master;
+drop table t1, t2;
+drop function f1;
+ERROR 42000: FUNCTION test.f1 does not exist
+#
+# Bug #11918 Can't use a declared variable in LIMIT clause
+#
+include/rpl_reset.inc
+create table t1 (c1 int);
+insert into t1 (c1) values
+(1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
+create procedure p1(p1 integer)
+delete from t1 limit p1;
+set @save_binlog_format=@@session.binlog_format;
+set @@session.binlog_format=STATEMENT;
+call p1(NULL);
+call p1(0);
+call p1(1);
+call p1(2);
+call p1(3);
+select * from t1;
+c1
+7
+8
+9
+10
+connection slave;
+connection slave;
+select * from t1;
+c1
+7
+8
+9
+10
+connection master;
+call p1(-1);
+select * from t1;
+c1
+connection slave;
+connection slave;
+select * from t1;
+c1
+connection master;
+# Cleanup
+set @@session.binlog_format=@save_binlog_format;
+drop table t1;
+drop procedure p1;
+# End of 5.5 tests.
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sp004.result b/mysql-test/suite/engines/funcs/r/rpl_sp004.result
index 1c0ed3cc50a..72f0428ce0f 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_sp004.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_sp004.result
@@ -1,9 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t2;
@@ -38,6 +35,7 @@ a
2
3
8
+connection slave;
SELECT * FROM test.t1 ORDER BY a;
a
1
@@ -50,15 +48,18 @@ a
2
3
8
+connection master;
CALL test.p2();
USE test;
SHOW TABLES;
Tables_in_test
t3
+connection slave;
USE test;
SHOW TABLES;
Tables_in_test
t3
+connection master;
CALL test.p1();
Warnings:
Note 1050 Table 't3' already exists
@@ -74,6 +75,7 @@ a
2
3
8
+connection slave;
SELECT * FROM test.t1 ORDER BY a;
a
1
@@ -86,8 +88,11 @@ a
2
3
8
+connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
DROP TABLE IF EXISTS test.t3;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sp_effects.result b/mysql-test/suite/engines/funcs/r/rpl_sp_effects.result
index 97cc0a78a23..d6890f22b02 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_sp_effects.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_sp_effects.result
@@ -1,9 +1,7 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection master;
+SET @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators;
drop procedure if exists p1;
drop procedure if exists p2;
drop function if exists f1;
@@ -20,6 +18,8 @@ set spv=spv+1;
end while;
end//
call p1();
+connection slave;
+connection slave;
SELECT * FROM t1 ORDER BY a;
a
1
@@ -27,6 +27,7 @@ a
3
4
5
+connection master;
SELECT * FROM t1 ORDER BY a;
a
1
@@ -43,9 +44,12 @@ call p2();
SELECT * FROM t2 ORDER BY a;
a
4
+connection slave;
+connection slave;
SELECT * FROM t2 ORDER BY a;
a
4
+connection master;
drop procedure p1;
drop procedure p2;
drop table t2;
@@ -104,6 +108,8 @@ master: 7
master: 8
master: 10
master: 11
+connection slave;
+connection slave;
SELECT 'slave:',a FROM t1 ORDER BY a;
slave: a
slave: 1
@@ -120,6 +126,7 @@ slave: 7
slave: 8
slave: 10
slave: 11
+connection master;
drop procedure p1;
delete from t1;
delete from t2;
@@ -134,10 +141,13 @@ SELECT 'master:',a FROM t1 ORDER BY a;
master: a
master: 1
master: 2
+connection slave;
+connection slave;
SELECT 'slave:',a FROM t1 ORDER BY a;
slave: a
slave: 1
slave: 2
+connection master;
drop view v1;
delete from t1;
prepare s1 from 'select f1(?)';
@@ -148,9 +158,12 @@ f1(?)
SELECT 'master:',a FROM t1 ORDER BY a;
master: a
master: 123
+connection slave;
+connection slave;
SELECT 'slave:',a FROM t1 ORDER BY a;
slave: a
slave: 123
+connection master;
delete from t1;
create procedure p1(spv int)
begin
@@ -169,12 +182,15 @@ master: 6
master: 6
master: 15
master: 15
+connection slave;
+connection slave;
SELECT 'slave:',a FROM t1 ORDER BY a;
slave: a
slave: 6
slave: 6
slave: 15
slave: 15
+connection master;
drop procedure p1;
drop function f1;
drop table t1,t2;
@@ -220,6 +236,8 @@ master 100
master 101
master 101
master 102
+connection slave;
+connection slave;
SELECT 'slave', a FROM t1 ORDER BY a;
slave a
slave 10
@@ -231,7 +249,73 @@ slave 100
slave 101
slave 101
slave 102
+connection master;
drop table t1;
drop function f1;
drop function f2;
drop procedure p1;
+connection slave;
+connection master;
+create table t2 (b BIT(7));
+create procedure sp_bug26199(bitvalue BIT(7))
+begin
+insert into t2 set b = bitvalue;
+end //
+create function sf_bug26199(b BIT(7)) returns int
+begin
+insert into t2 values(b);
+return 0;
+end//
+call sp_bug26199(b'1110');
+call sp_bug26199('\0');
+select sf_bug26199(b'1111111');
+sf_bug26199(b'1111111')
+0
+SET STATEMENT sql_mode = '' FOR
+select sf_bug26199(b'101111111');
+sf_bug26199(b'101111111')
+0
+Warnings:
+Warning 1264 Out of range value for column 'b' at row 1
+select sf_bug26199('\'');
+sf_bug26199('\'')
+0
+select hex(b) from t2;
+hex(b)
+E
+0
+7F
+7F
+27
+connection slave;
+select hex(b) from t2;
+hex(b)
+E
+0
+7F
+7F
+27
+connection master;
+drop table t2;
+drop procedure sp_bug26199;
+drop function sf_bug26199;
+SET @@global.log_bin_trust_function_creators= @old_log_bin_trust_function_creators;
+connection slave;
+set names utf8;
+CREATE FUNCTION f() RETURNS timestamp DETERMINISTIC
+BEGIN RETURN '2012-12-21 12:12:12'; END |
+CREATE PROCEDURE p(t timestamp)
+BEGIN
+SET @t = t;
+PREPARE stmt FROM "
+ UPDATE t1 SET a = @t WHERE '2012-12-31 08:00:00' < f() ";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+END |
+create table t1 (a timestamp);
+call p('2012-12-31 08:00:00');
+drop table t1;
+drop procedure p;
+drop function f;
+end of the tests
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_start_stop_slave.result b/mysql-test/suite/engines/funcs/r/rpl_start_stop_slave.result
index 1fcb586d1fb..48c5e548fd2 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_start_stop_slave.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_start_stop_slave.result
@@ -1,12 +1,17 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
+connection master;
create table t1(n int);
-start slave;
-stop slave io_thread;
+connection slave;
+include/start_slave.inc
+include/stop_slave_io.inc
start slave io_thread;
+include/wait_for_slave_io_to_start.inc
+connection master;
+connection slave;
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_stm_max_relay_size.result b/mysql-test/suite/engines/funcs/r/rpl_stm_max_relay_size.result
index 547dd8e1541..41d18d7f9fe 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_stm_max_relay_size.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_stm_max_relay_size.result
@@ -1,20 +1,19 @@
+include/master-slave.inc
+[connection master]
+connection slave;
stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
+connection master;
#
# Generate a big enough master's binlog to cause relay log rotations
#
create table t1 (a int);
drop table t1;
+connection slave;
reset slave;
#
# Test 1
#
-set @my_max_binlog_size= @@global.max_binlog_size;
+set @my_max_binlog_size= @@global.max_binlog_size, @my_max_relay_log_size=@@global.max_relay_log_size;
set global max_binlog_size=8192;
set global max_relay_log_size=8192-1;
Warnings:
@@ -23,7 +22,7 @@ select @@global.max_relay_log_size;
@@global.max_relay_log_size
4096
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 2
#
@@ -33,17 +32,19 @@ set global max_relay_log_size=(5*4096);
select @@global.max_relay_log_size;
@@global.max_relay_log_size 20480
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 3: max_relay_log_size = 0
#
stop slave;
reset slave;
set global max_relay_log_size=0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
select @@global.max_relay_log_size;
-@@global.max_relay_log_size 0
+@@global.max_relay_log_size 4096
start slave;
-Checking that both slave threads are running.
+include/check_slave_is_running.inc
#
# Test 4: Tests below are mainly to ensure that we have not coded with wrong assumptions
#
@@ -56,19 +57,27 @@ flush logs;
reset slave;
start slave;
flush logs;
+connection master;
create table t1 (a int);
-Checking that both slave threads are running.
+connection slave;
+include/check_slave_is_running.inc
#
# Test 6: one more rotation, to be sure Relay_Log_Space is correctly updated
#
flush logs;
+connection master;
drop table t1;
-Checking that both slave threads are running.
+connection slave;
+include/check_slave_is_running.inc
+connection master;
flush logs;
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000002 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+connection slave;
set global max_binlog_size= @my_max_binlog_size;
+set global max_relay_log_size= @my_max_relay_log_size;
#
# End of 4.1 tests
#
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_stm_mystery22.result b/mysql-test/suite/engines/funcs/r/rpl_stm_mystery22.result
index ea34b308ec2..0d99235e45c 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_stm_mystery22.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_stm_mystery22.result
@@ -1,20 +1,20 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t1(n int auto_increment primary key, s char(10));
+connection slave;
insert into t1 values (2,'old');
+connection master;
insert into t1 values(NULL,'new');
insert into t1 values(NULL,'new');
+connection slave;
select * from t1 order by n;
n s
1 new
2 old
delete from t1 where n = 2;
-start slave;
-stop slave;
+include/start_slave.inc
+include/stop_slave.inc
+connection master;
create table t2(n int);
drop table t2;
insert into t1 values(NULL,'new');
@@ -22,10 +22,14 @@ set sql_log_bin=0;
insert into t1 values(NULL,'new');
set sql_log_bin=1;
delete from t1 where n=4;
-start slave;
+connection slave;
+include/start_slave.inc
select * from t1 order by n;
n s
1 new
2 new
3 new
+connection master;
drop table t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_stm_no_op.result b/mysql-test/suite/engines/funcs/r/rpl_stm_no_op.result
index 5a253d61fcb..2505660f863 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_stm_no_op.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_stm_no_op.result
@@ -1,52 +1,71 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
create database mysqltest;
+connection master;
drop database if exists mysqltest;
Warnings:
Note 1008 Can't drop database 'mysqltest'; database doesn't exist
+connection slave;
show tables from mysqltest;
ERROR 42000: Unknown database 'mysqltest'
+connection slave;
create table t1 (a int);
+connection master;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
+connection slave;
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
+connection master;
create table t1 (a int, b int);
+connection slave;
insert into t1 values(1,1);
+connection master;
delete from t1;
+connection slave;
select * from t1;
a b
insert into t1 values(1,1);
+connection master;
insert into t1 values(2,1);
update t1 set a=2;
+connection slave;
select * from t1;
a b
2 1
2 1
+connection master;
create table t2 (a int, b int);
delete from t1;
insert into t1 values(1,1);
insert into t2 values(1,1);
+connection slave;
update t1 set a=2;
+connection master;
UPDATE t1, t2 SET t1.a = t2.a;
+connection slave;
select * from t1;
a b
1 1
select * from t2;
a b
1 1
+connection master;
delete from t1;
delete from t2;
+connection slave;
insert into t1 values(1,1);
insert into t2 values(1,1);
+connection master;
DELETE t1.*, t2.* from t1, t2;
+connection slave;
select * from t1;
a b
select * from t2;
a b
+connection master;
drop table t1, t2;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_stm_reset_slave.result b/mysql-test/suite/engines/funcs/r/rpl_stm_reset_slave.result
index c6c2c525098..1ba2d1b624b 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_stm_reset_slave.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_stm_reset_slave.result
@@ -1,41 +1,58 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-Master_User root
-Master_Host 127.0.0.1
+include/master-slave.inc
+[connection master]
+connection slave;
+Master_User = 'root'
+Master_Host = '127.0.0.1'
include/stop_slave.inc
change master to master_user='test';
-Master_User test
-Master_Host 127.0.0.1
+Master_User = 'test'
+Master_Host = '127.0.0.1'
reset slave;
-Master_User test
-Master_Host 127.0.0.1
+Master_User = 'test'
+Master_Host = '127.0.0.1'
change master to master_user='root';
include/start_slave.inc
-Master_User root
-Master_Host 127.0.0.1
+Master_User = 'root'
+Master_Host = '127.0.0.1'
include/stop_slave.inc
reset slave;
include/start_slave.inc
+connection master;
create temporary table t1 (a int);
+connection slave;
include/stop_slave.inc
reset slave;
include/start_slave.inc
show status like 'slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
+connection master;
+drop temporary table if exists t1;
+connection slave;
include/stop_slave.inc
reset slave;
+include/check_slave_no_error.inc
change master to master_user='impossible_user_name';
start slave;
-stop slave;
+include/wait_for_slave_io_error.inc [errno=1045]
+include/stop_slave_sql.inc
change master to master_user='root';
include/start_slave.inc
+include/check_slave_no_error.inc
stop slave;
change master to master_user='impossible_user_name';
start slave;
-stop slave;
+include/wait_for_slave_io_error.inc [errno=1045]
+include/stop_slave_sql.inc
reset slave;
+include/check_slave_no_error.inc
+change master to master_user='root';
+reset slave;
+include/start_slave.inc
+include/stop_slave.inc
+reset slave all;
+start slave;
+ERROR HY000: Misconfigured slave: MASTER_HOST was not set; Fix in config file or with CHANGE MASTER TO
+CHANGE MASTER TO MASTER_HOST= 'MASTER_HOST', MASTER_USER= 'MASTER_USER', MASTER_PORT= MASTER_PORT;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_switch_stm_row_mixed.result b/mysql-test/suite/engines/funcs/r/rpl_switch_stm_row_mixed.result
index b0d92f3ca3f..936f604be2e 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_switch_stm_row_mixed.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_switch_stm_row_mixed.result
@@ -1,15 +1,36 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-set @saved_binlog_format = @@global.binlog_format;
+include/master-slave.inc
+[connection master]
+connection slave;
+connection master;
drop database if exists mysqltest1;
create database mysqltest1;
use mysqltest1;
+set @my_binlog_format= @@global.binlog_format;
+set session binlog_format=mixed;
+show session variables like "binlog_format%";
+Variable_name Value
+binlog_format MIXED
+set session binlog_format=statement;
+show session variables like "binlog_format%";
+Variable_name Value
+binlog_format STATEMENT
set session binlog_format=row;
-set global binlog_format=row;
+show session variables like "binlog_format%";
+Variable_name Value
+binlog_format ROW
+set global binlog_format=DEFAULT;
+show global variables like "binlog_format%";
+Variable_name Value
+binlog_format MIXED
+set global binlog_format=MIXED;
+show global variables like "binlog_format%";
+Variable_name Value
+binlog_format MIXED
+set global binlog_format=STATEMENT;
+show global variables like "binlog_format%";
+Variable_name Value
+binlog_format STATEMENT
+set global binlog_format=ROW;
show global variables like "binlog_format%";
Variable_name Value
binlog_format ROW
@@ -68,11 +89,11 @@ execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_10_");
insert into t1 select "yesterday_11_";
-set binlog_format=default;
+set binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
STATEMENT STATEMENT
-set global binlog_format=default;
+set global binlog_format=statement;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
STATEMENT STATEMENT
@@ -87,11 +108,11 @@ execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values("for_15_");
insert into t1 select "yesterday_16_";
-set binlog_format=mixed;
+set global binlog_format=mixed;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
-STATEMENT MIXED
-set global binlog_format=mixed;
+MIXED STATEMENT
+set binlog_format=default;
select @@global.binlog_format, @@session.binlog_format;
@@global.binlog_format @@session.binlog_format
MIXED MIXED
@@ -112,11 +133,15 @@ execute stmt1 using @string;
deallocate prepare stmt1;
insert into t1 values(concat("for_23_",UUID()));
insert into t1 select "yesterday_24_";
-create table t2 select rpad(UUID(),100,' ');
+create table t2 ENGINE=MyISAM select rpad(UUID(),100,' ');
create table t3 select 1 union select UUID();
+SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
+SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
-insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '3'
+insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
create procedure foo()
begin
insert into t1 values("work_25_");
@@ -228,6 +253,14 @@ begin
set NEW.data = concat(NEW.data,UUID());
end|
insert into t11 values("try_560_");
+insert delayed into t2 values("delay_1_");
+insert delayed into t2 values(concat("delay_2_",UUID()));
+insert delayed into t2 values("delay_6_");
+insert delayed into t2 values(rand());
+set @a=2.345;
+insert delayed into t2 values(@a);
+connection slave;
+connection master;
create table t20 select * from t1;
create table t21 select * from t2;
create table t22 select * from t3;
@@ -244,14 +277,18 @@ end|
select f("try_41_");
f("try_41_")
1
+connection slave;
use mysqltest1;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
+connection master;
select f("try_42_");
f("try_42_")
1
+connection slave;
insert into t2 values(3,null),(4,null);
delete from t2 where a>=3;
+connection master;
prepare stmt1 from 'select f(?)';
set @string="try_43_";
insert into t1 values(null,"try_44_");
@@ -259,6 +296,8 @@ execute stmt1 using @string;
f(?)
1
deallocate prepare stmt1;
+connection slave;
+connection master;
create table t12 select * from t1;
drop table t1;
create table t1 (a int, b varchar(100), key(a));
@@ -284,12 +323,16 @@ end|
select f1("try_46_"),f2("try_47_");
f1("try_46_") f2("try_47_")
1 1
+connection slave;
insert into t2 values(2,null),(3,null),(4,null);
delete from t2 where a>=2;
+connection master;
select f1("try_48_"),f2("try_49_");
f1("try_48_") f2("try_49_")
1 1
insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
+connection slave;
+connection master;
drop function f2;
create function f2 (x varchar(100)) returns int deterministic
begin
@@ -301,6 +344,8 @@ end|
select f1("try_53_"),f2("try_54_");
f1("try_53_") f2("try_54_")
1 3
+connection slave;
+connection master;
drop function f2;
create trigger t1_bi before insert on t1 for each row
begin
@@ -309,12 +354,16 @@ end|
insert into t1 values(null,"try_56_");
alter table t1 modify a int, drop primary key;
insert into t1 values(null,"try_57_");
+connection slave;
+connection master;
CREATE TEMPORARY TABLE t15 SELECT UUID();
create table t16 like t15;
INSERT INTO t16 SELECT * FROM t15;
insert into t16 values("try_65_");
drop table t15;
insert into t16 values("try_66_");
+connection slave;
+connection master;
select count(*) from t1;
count(*)
7
@@ -338,7 +387,7 @@ count(*)
66
select count(*) from t21;
count(*)
-14
+19
select count(*) from t22;
count(*)
2
@@ -354,6 +403,8 @@ count(*)
select count(*) from t16;
count(*)
3
+connection slave;
+connection master;
DROP TABLE IF EXISTS t11;
SET SESSION BINLOG_FORMAT=STATEMENT;
CREATE TABLE t11 (song VARCHAR(255));
@@ -366,17 +417,44 @@ UNLOCK TABLES;
SELECT * FROM t11;
song Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict
song Careful With That Axe, Eugene
+connection slave;
USE mysqltest1;
SELECT * FROM t11;
song Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict
song Careful With That Axe, Eugene
+connection master;
DROP TABLE IF EXISTS t12;
SET SESSION BINLOG_FORMAT=MIXED;
CREATE TABLE t12 (data LONG);
LOCK TABLES t12 WRITE;
INSERT INTO t12 VALUES(UUID());
UNLOCK TABLES;
-show binlog events;
-show binlog events;
+connection slave;
+connection master;
+CREATE FUNCTION my_user()
+RETURNS CHAR(64)
+BEGIN
+DECLARE user CHAR(64);
+SELECT USER() INTO user;
+RETURN user;
+END $$
+CREATE FUNCTION my_current_user()
+RETURNS CHAR(64)
+BEGIN
+DECLARE user CHAR(64);
+SELECT CURRENT_USER() INTO user;
+RETURN user;
+END $$
+DROP TABLE IF EXISTS t13;
+CREATE TABLE t13 (data CHAR(64));
+INSERT INTO t13 VALUES (USER());
+INSERT INTO t13 VALUES (my_user());
+INSERT INTO t13 VALUES (CURRENT_USER());
+INSERT INTO t13 VALUES (my_current_user());
+connection slave;
+connection master;
drop database mysqltest1;
-set global binlog_format= @saved_binlog_format;
+connection slave;
+connection master;
+set global binlog_format =@my_binlog_format;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_temp_table.result b/mysql-test/suite/engines/funcs/r/rpl_temp_table.result
index e4ca31d8908..ff3e52f77b5 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_temp_table.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_temp_table.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create table t2 (n int, PRIMARY KEY(n));
create temporary table t1 (n int);
create temporary table t3 (n int not null);
@@ -15,10 +11,16 @@ insert into t3 values (1010);
insert into t2 select * from t3;
drop table if exists t3;
insert into t2 values (1012);
+connection master1;
create temporary table t1 (n int);
insert into t1 values (4),(5);
insert into t2 select * from t1;
+disconnect master;
+connection slave;
+connection master1;
insert into t2 values(61);
+disconnect master1;
+connection slave;
select * from t2;
n
1
@@ -43,6 +45,19 @@ sum(n)
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 0
+*** MDEV-8016: Replication aborts on DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ***
+connect master2,localhost,root,,;
+INSERT INTO t2 VALUES (2000), (2001);
+CREATE FUNCTION f() RETURNS INTEGER RETURN 1;
+CREATE TEMPORARY TABLE t3 AS SELECT f() AS col FROM t2;
+disconnect master2;
+connection default;
+connection slave;
+connect master2,localhost,root,,;
+connection master2;
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
+drop function f;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_temporary.result b/mysql-test/suite/engines/funcs/r/rpl_temporary.result
index 27def773a30..e2999cdd225 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_temporary.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_temporary.result
@@ -1,11 +1,43 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+SET sql_log_bin = 0;
+SET sql_log_bin = 1;
+include/master-slave.inc
+[connection master]
+connection slave;
reset master;
-CREATE USER ''@localhost;
+connection master;
+DROP TABLE IF EXISTS t1;
+CREATE TEMPORARY TABLE t1 (a char(1));
+INSERT INTO t1 VALUES ('a');
+connection slave;
+include/stop_slave.inc
+include/start_slave.inc
+connection master;
+INSERT INTO t1 VALUES ('b');
+connection slave;
+connection master;
+DROP TABLE IF EXISTS t1;
+CREATE TEMPORARY TABLE `t1`(`a` tinyint,`b` char(1))engine=myisam;
+INSERT IGNORE INTO `t1` set `a`=128,`b`='128';
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+Warning 1265 Data truncated for column 'b' at row 1
+connection slave;
+include/stop_slave.inc
+include/start_slave.inc
+connection master;
+INSERT IGNORE INTO `t1` set `a`=128,`b`='128';
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 1
+Warning 1265 Data truncated for column 'b' at row 1
+connection slave;
+connection master;
+DROP TABLE t1;
+connection slave;
+connection master;
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connect con3,localhost,zedjzlcsjhd,,;
+connection con3;
SET @save_select_limit=@@session.sql_select_limit;
SET @@session.sql_select_limit=10, @@session.pseudo_thread_id=100;
ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation
@@ -17,20 +49,33 @@ ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) fo
SELECT @@session.sql_select_limit = @save_select_limit;
@@session.sql_select_limit = @save_select_limit
1
+connection con2;
+SET @save_conn_id= connection_id();
SET @@session.pseudo_thread_id=100;
SET @@session.pseudo_thread_id=connection_id();
+SET @@session.pseudo_thread_id=@save_conn_id;
SET @@session.sql_log_bin=0;
SET @@session.sql_log_bin=1;
+connection con3;
drop table if exists t1,t2;
create table t1(f int);
create table t2(f int);
insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+10
+connection con1;
create temporary table t3(f int);
insert into t3 select * from t1 where f<6;
+connection con2;
create temporary table t3(f int);
+connection con1;
insert into t2 select count(*) from t3;
+connection con2;
insert into t3 select * from t1 where f>=4;
+connection con1;
drop temporary table t3;
+connection con2;
insert into t2 select count(*) from t3;
drop temporary table t3;
select * from t2 ORDER BY f;
@@ -75,15 +120,22 @@ f
7
drop table t1,t2;
create temporary table t3 (f int);
+connection slave;
+connection master;
create temporary table t4 (f int);
create table t5 (f int);
-select id from information_schema.processlist where command='Binlog Dump' into @id;
-kill @id;
+connection slave;
+stop slave;
+connection master;
insert into t5 select * from t4;
+connection slave;
+start slave;
select * from t5 /* must be 1 after reconnection */;
f
+connection master;
drop temporary table t4;
drop table t5;
+connection con1;
set @@session.pseudo_thread_id=100;
create temporary table t101 (id int);
create temporary table t102 (id int);
@@ -95,14 +147,99 @@ set @@session.pseudo_thread_id=300;
create temporary table t301 (id int);
create temporary table t302 (id int);
create temporary table `#sql_not_user_table303` (id int);
-DROP USER ''@localhost;
+disconnect con1;
+connection master;
create table t1(f int);
insert into t1 values (1);
+connection slave;
select * from t1 /* must be 1 */;
f
1
+connection master;
drop table t1;
+connection slave;
select * from t1;
a
1
+connection master;
drop table t1;
+connection slave;
+include/stop_slave.inc
+connection master;
+include/rpl_reset.inc
+-- Bug#43748
+-- make a user on the slave that can list but not kill system threads.
+connection slave;
+FLUSH PRIVILEGES;
+GRANT USAGE ON *.* TO user43748@127.0.0.1 IDENTIFIED BY 'meow';
+GRANT PROCESS ON *.* TO user43748@127.0.0.1;
+-- try to KILL system-thread as that non-privileged user (on slave).
+connect cont43748,127.0.0.1,user43748,meow,test,$SLAVE_MYPORT,;
+connection cont43748;
+SELECT id INTO @id FROM information_schema.processlist WHERE user='system user' LIMIT 1;
+KILL @id;
+Got one of the listed errors
+disconnect cont43748;
+-- throw out test-user on slave.
+connection slave;
+DROP USER user43748@127.0.0.1;
+#
+# MDEV-10216: Assertion `strcmp(share->unique_file_name,filename) ||
+# share->last_version' failed in myisam/mi_open.c:67: test_if_reopen
+#
+connection master;
+CREATE TEMPORARY TABLE t1(i INT PRIMARY KEY) ENGINE=MYISAM;
+INSERT INTO t1 VALUES(1);
+SELECT COUNT(*)=1 FROM t1;
+COUNT(*)=1
+1
+ALTER TABLE t1 RENAME t2;
+SELECT COUNT(*)=1 FROM t2;
+COUNT(*)=1
+1
+ALTER TABLE t2 RENAME t1;
+ALTER TABLE t1 DISABLE KEYS;
+ALTER TABLE t1 ENABLE KEYS;
+LOCK TABLES t1 WRITE;
+ALTER TABLE t1 RENAME t2;
+SELECT COUNT(*)=1 FROM t2;
+COUNT(*)=1
+1
+ALTER TABLE t2 RENAME t1;
+ALTER TABLE t1 DISABLE KEYS;
+ALTER TABLE t1 ENABLE KEYS;
+UNLOCK TABLES;
+LOCK TABLES t1 READ;
+ALTER TABLE t1 RENAME t2;
+SELECT COUNT(*)=1 FROM t2;
+COUNT(*)=1
+1
+ALTER TABLE t2 RENAME t1;
+ALTER TABLE t1 DISABLE KEYS;
+ALTER TABLE t1 ENABLE KEYS;
+UNLOCK TABLES;
+FLUSH TABLES WITH READ LOCK;
+ALTER TABLE t1 RENAME t2;
+SELECT COUNT(*)=1 FROM t2;
+COUNT(*)=1
+1
+ALTER TABLE t2 RENAME t1;
+ALTER TABLE t1 DISABLE KEYS;
+ALTER TABLE t1 ENABLE KEYS;
+UNLOCK TABLES;
+ALTER TABLE t1 RENAME t2, LOCK SHARED;
+ALTER TABLE t2 RENAME t1, LOCK EXCLUSIVE;
+DROP TABLE t1;
+#
+# MDEV-10320: NO-OP ALTER TABLE on temporary tables getting logged
+# under row binlog format
+#
+connection master;
+CREATE TEMPORARY TABLE t1(i INT PRIMARY KEY) ENGINE=MYISAM;
+ALTER TABLE t1;
+ALTER TABLE t1 ADD COLUMN IF NOT EXISTS I INT;
+Warnings:
+Note 1060 Duplicate column name 'I'
+DROP TABLE t1;
+End of 5.1 tests
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_trigger.result b/mysql-test/suite/engines/funcs/r/rpl_trigger.result
index 7c7cda1f581..b5b88670fc6 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_trigger.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_trigger.result
@@ -1,12 +1,6 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
+include/master-slave.inc
+[connection master]
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
create table t1 (a int auto_increment, primary key (a), b int, rand_value double not null);
create table t2 (a int auto_increment, primary key (a), b int);
create table t3 (a int auto_increment, primary key (a), name varchar(64) not null, old_a int, old_b int, rand_value double not null);
@@ -22,8 +16,10 @@ insert into t3 values(100,"log",0,0,0);
SET @@RAND_SEED1=658490765, @@RAND_SEED2=635893186;
insert into t1 values(1,1,rand()),(NULL,2,rand());
insert into t2 (b) values(last_insert_id());
-insert into t2 values(3,0),(NULL,0);
-insert into t2 values(NULL,0),(500,0);
+insert into t2 values(3,0);
+insert into t2 values(NULL,0);
+insert into t2 values(NULL,0);
+insert into t2 values(500,0);
select a,b, truncate(rand_value,4) from t1;
a b truncate(rand_value,4)
1 1 0.4320
@@ -39,14 +35,13 @@ select a,name, old_a, old_b, truncate(rand_value,4) from t3;
a name old_a old_b truncate(rand_value,4)
100 log 0 0 0.0000
101 t1 1 1 0.3203
-102 t1 0 2 0.5666
+102 t1 NULL 2 0.5666
103 t2 1 2 0.9164
104 t2 3 0 0.8826
105 t2 4 0 0.6635
106 t2 5 0 0.6699
107 t2 500 0 0.3593
-
---- On slave --
+connection slave;
select a,b, truncate(rand_value,4) from t1;
a b truncate(rand_value,4)
1 1 0.4320
@@ -62,16 +57,20 @@ select a,name, old_a, old_b, truncate(rand_value,4) from t3;
a name old_a old_b truncate(rand_value,4)
100 log 0 0 0.0000
101 t1 1 1 0.3203
-102 t1 0 2 0.5666
+102 t1 NULL 2 0.5666
103 t2 1 2 0.9164
104 t2 3 0 0.8826
105 t2 4 0 0.6635
106 t2 5 0 0.6699
107 t2 500 0 0.3593
+connection master;
drop table t1,t2,t3;
+connect con2,localhost,root,,;
+connection con2;
select get_lock("bug12480",2);
get_lock("bug12480",2)
1
+connection default;
create table t1 (a datetime,b datetime, c datetime);
drop function if exists bug12480;
create function bug12480() returns datetime
@@ -91,40 +90,33 @@ select a=b && a=c from t1;
a=b && a=c
1
SELECT routine_name, definer
-FROM information_schema.routines;
+FROM information_schema.routines
+WHERE routine_name = 'bug12480';
routine_name definer
-add_suppression root@localhost
-check_testcase root@localhost
-check_warnings root@localhost
-force_restart root@localhost
bug12480 root@localhost
SELECT trigger_name, definer
-FROM information_schema.triggers;
+FROM information_schema.triggers
+WHERE trigger_name = 't1_first';
trigger_name definer
-gs_insert root@localhost
-ts_insert root@localhost
t1_first root@localhost
-
---- On slave --
+connection slave;
SELECT routine_name, definer
-FROM information_schema.routines;
+FROM information_schema.routines
+WHERE routine_name = 'bug12480';
routine_name definer
-add_suppression root@localhost
-check_testcase root@localhost
-check_warnings root@localhost
-force_restart root@localhost
bug12480 root@localhost
SELECT trigger_name, definer
-FROM information_schema.triggers;
+FROM information_schema.triggers
+WHERE trigger_name = 't1_first';
trigger_name definer
-gs_insert root@localhost
-ts_insert root@localhost
t1_first root@localhost
select a=b && a=c from t1;
a=b && a=c
1
test
1
+connection master;
+disconnect con2;
truncate table t1;
drop trigger t1_first;
insert into t1 values ("2003-03-03","2003-03-03","2003-03-03"),(bug12480(),bug12480(),bug12480()),(now(),now(),now());
@@ -144,6 +136,8 @@ end|
create database other;
use other;
insert into test.t1 values (1);
+connection slave;
+connection master;
use test;
drop table t1,t2;
drop database other;
@@ -151,14 +145,18 @@ test case for BUG#13227
-------------------
10
-------------------
+connection master;
drop table if exists t110;
+connection slave;
drop table if exists t210,t310;
+connection master;
create table t110 (f1 int) /* 2 replicate */;
insert into t110 values (-5);
insert into t110 values (-4);
insert into t110 values (-3);
insert into t110 values (-2);
insert into t110 values (-1);
+connection slave;
select * from t110;
f1
-5
@@ -198,11 +196,13 @@ f1
SELECT * from t310 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t110 SET f1=5 where f1=-5;
UPDATE t110 SET f1=4 where f1=-4;
UPDATE t110 SET f1=3 where f1=-3;
UPDATE t110 SET f1=2 where f1=-2;
UPDATE t110 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t110 /* must be f1 5 ... 1 */;
f1
5
@@ -219,18 +219,25 @@ f3
100
drop trigger trg110;
drop table t210,t310;
+connection master;
drop table t110;
+connection slave;
+connection master;
-------------------
9
-------------------
+connection master;
drop table if exists t19;
+connection slave;
drop table if exists t29,t39;
+connection master;
create table t19 (f1 int) /* 2 replicate */;
insert into t19 values (-5);
insert into t19 values (-4);
insert into t19 values (-3);
insert into t19 values (-2);
insert into t19 values (-1);
+connection slave;
select * from t19;
f1
-5
@@ -270,11 +277,13 @@ f1
SELECT * from t39 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t19 SET f1=5 where f1=-5;
UPDATE t19 SET f1=4 where f1=-4;
UPDATE t19 SET f1=3 where f1=-3;
UPDATE t19 SET f1=2 where f1=-2;
UPDATE t19 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t19 /* must be f1 5 ... 1 */;
f1
5
@@ -291,18 +300,25 @@ f3
100
drop trigger trg19;
drop table t29,t39;
+connection master;
drop table t19;
+connection slave;
+connection master;
-------------------
8
-------------------
+connection master;
drop table if exists t18;
+connection slave;
drop table if exists t28,t38;
+connection master;
create table t18 (f1 int) /* 2 replicate */;
insert into t18 values (-5);
insert into t18 values (-4);
insert into t18 values (-3);
insert into t18 values (-2);
insert into t18 values (-1);
+connection slave;
select * from t18;
f1
-5
@@ -342,11 +358,13 @@ f1
SELECT * from t38 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t18 SET f1=5 where f1=-5;
UPDATE t18 SET f1=4 where f1=-4;
UPDATE t18 SET f1=3 where f1=-3;
UPDATE t18 SET f1=2 where f1=-2;
UPDATE t18 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t18 /* must be f1 5 ... 1 */;
f1
5
@@ -363,18 +381,25 @@ f3
100
drop trigger trg18;
drop table t28,t38;
+connection master;
drop table t18;
+connection slave;
+connection master;
-------------------
7
-------------------
+connection master;
drop table if exists t17;
+connection slave;
drop table if exists t27,t37;
+connection master;
create table t17 (f1 int) /* 2 replicate */;
insert into t17 values (-5);
insert into t17 values (-4);
insert into t17 values (-3);
insert into t17 values (-2);
insert into t17 values (-1);
+connection slave;
select * from t17;
f1
-5
@@ -414,11 +439,13 @@ f1
SELECT * from t37 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t17 SET f1=5 where f1=-5;
UPDATE t17 SET f1=4 where f1=-4;
UPDATE t17 SET f1=3 where f1=-3;
UPDATE t17 SET f1=2 where f1=-2;
UPDATE t17 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t17 /* must be f1 5 ... 1 */;
f1
5
@@ -435,18 +462,25 @@ f3
100
drop trigger trg17;
drop table t27,t37;
+connection master;
drop table t17;
+connection slave;
+connection master;
-------------------
6
-------------------
+connection master;
drop table if exists t16;
+connection slave;
drop table if exists t26,t36;
+connection master;
create table t16 (f1 int) /* 2 replicate */;
insert into t16 values (-5);
insert into t16 values (-4);
insert into t16 values (-3);
insert into t16 values (-2);
insert into t16 values (-1);
+connection slave;
select * from t16;
f1
-5
@@ -486,11 +520,13 @@ f1
SELECT * from t36 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t16 SET f1=5 where f1=-5;
UPDATE t16 SET f1=4 where f1=-4;
UPDATE t16 SET f1=3 where f1=-3;
UPDATE t16 SET f1=2 where f1=-2;
UPDATE t16 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t16 /* must be f1 5 ... 1 */;
f1
5
@@ -507,18 +543,25 @@ f3
100
drop trigger trg16;
drop table t26,t36;
+connection master;
drop table t16;
+connection slave;
+connection master;
-------------------
5
-------------------
+connection master;
drop table if exists t15;
+connection slave;
drop table if exists t25,t35;
+connection master;
create table t15 (f1 int) /* 2 replicate */;
insert into t15 values (-5);
insert into t15 values (-4);
insert into t15 values (-3);
insert into t15 values (-2);
insert into t15 values (-1);
+connection slave;
select * from t15;
f1
-5
@@ -558,11 +601,13 @@ f1
SELECT * from t35 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t15 SET f1=5 where f1=-5;
UPDATE t15 SET f1=4 where f1=-4;
UPDATE t15 SET f1=3 where f1=-3;
UPDATE t15 SET f1=2 where f1=-2;
UPDATE t15 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t15 /* must be f1 5 ... 1 */;
f1
5
@@ -579,18 +624,25 @@ f3
100
drop trigger trg15;
drop table t25,t35;
+connection master;
drop table t15;
+connection slave;
+connection master;
-------------------
4
-------------------
+connection master;
drop table if exists t14;
+connection slave;
drop table if exists t24,t34;
+connection master;
create table t14 (f1 int) /* 2 replicate */;
insert into t14 values (-5);
insert into t14 values (-4);
insert into t14 values (-3);
insert into t14 values (-2);
insert into t14 values (-1);
+connection slave;
select * from t14;
f1
-5
@@ -630,11 +682,13 @@ f1
SELECT * from t34 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t14 SET f1=5 where f1=-5;
UPDATE t14 SET f1=4 where f1=-4;
UPDATE t14 SET f1=3 where f1=-3;
UPDATE t14 SET f1=2 where f1=-2;
UPDATE t14 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t14 /* must be f1 5 ... 1 */;
f1
5
@@ -651,18 +705,25 @@ f3
100
drop trigger trg14;
drop table t24,t34;
+connection master;
drop table t14;
+connection slave;
+connection master;
-------------------
3
-------------------
+connection master;
drop table if exists t13;
+connection slave;
drop table if exists t23,t33;
+connection master;
create table t13 (f1 int) /* 2 replicate */;
insert into t13 values (-5);
insert into t13 values (-4);
insert into t13 values (-3);
insert into t13 values (-2);
insert into t13 values (-1);
+connection slave;
select * from t13;
f1
-5
@@ -702,11 +763,13 @@ f1
SELECT * from t33 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t13 SET f1=5 where f1=-5;
UPDATE t13 SET f1=4 where f1=-4;
UPDATE t13 SET f1=3 where f1=-3;
UPDATE t13 SET f1=2 where f1=-2;
UPDATE t13 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t13 /* must be f1 5 ... 1 */;
f1
5
@@ -723,18 +786,25 @@ f3
100
drop trigger trg13;
drop table t23,t33;
+connection master;
drop table t13;
+connection slave;
+connection master;
-------------------
2
-------------------
+connection master;
drop table if exists t12;
+connection slave;
drop table if exists t22,t32;
+connection master;
create table t12 (f1 int) /* 2 replicate */;
insert into t12 values (-5);
insert into t12 values (-4);
insert into t12 values (-3);
insert into t12 values (-2);
insert into t12 values (-1);
+connection slave;
select * from t12;
f1
-5
@@ -774,11 +844,13 @@ f1
SELECT * from t32 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t12 SET f1=5 where f1=-5;
UPDATE t12 SET f1=4 where f1=-4;
UPDATE t12 SET f1=3 where f1=-3;
UPDATE t12 SET f1=2 where f1=-2;
UPDATE t12 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t12 /* must be f1 5 ... 1 */;
f1
5
@@ -795,18 +867,25 @@ f3
100
drop trigger trg12;
drop table t22,t32;
+connection master;
drop table t12;
+connection slave;
+connection master;
-------------------
1
-------------------
+connection master;
drop table if exists t11;
+connection slave;
drop table if exists t21,t31;
+connection master;
create table t11 (f1 int) /* 2 replicate */;
insert into t11 values (-5);
insert into t11 values (-4);
insert into t11 values (-3);
insert into t11 values (-2);
insert into t11 values (-1);
+connection slave;
select * from t11;
f1
-5
@@ -846,11 +925,13 @@ f1
SELECT * from t31 /* must be f3 5*100 */;
f3
500
+connection master;
UPDATE t11 SET f1=5 where f1=-5;
UPDATE t11 SET f1=4 where f1=-4;
UPDATE t11 SET f1=3 where f1=-3;
UPDATE t11 SET f1=2 where f1=-2;
UPDATE t11 SET f1=1 where f1=-1;
+connection slave;
SELECT * from t11 /* must be f1 5 ... 1 */;
f1
5
@@ -867,11 +948,20 @@ f3
100
drop trigger trg11;
drop table t21,t31;
+connection master;
drop table t11;
-STOP SLAVE;
+connection slave;
+connection master;
+connection slave;
+include/stop_slave.inc
+connection master;
FLUSH LOGS;
+include/rpl_stop_server.inc [server_number=1]
+include/rpl_start_server.inc [server_number=1]
+--> Master binlog: Server ver: 5.0.16-debug-log, Binlog ver: 4
+connection slave;
RESET SLAVE;
-START SLAVE;
+include/start_slave.inc
SELECT MASTER_POS_WAIT('master-bin.000001', 513) >= 0;
MASTER_POS_WAIT('master-bin.000001', 513) >= 0
1
@@ -881,7 +971,7 @@ t1
t2
SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
-trg1 INSERT t1 INSERT INTO t2 VALUES(CURRENT_USER()) AFTER NULL latin1 latin1_swedish_ci latin1_swedish_ci
+trg1 INSERT t1 INSERT INTO t2 VALUES(CURRENT_USER()) AFTER # latin1 latin1_swedish_ci latin1_swedish_ci
SELECT * FROM t1;
c
1
@@ -900,25 +990,28 @@ root@localhost
DROP TRIGGER trg1;
DROP TABLE t1;
DROP TABLE t2;
-STOP SLAVE;
+include/stop_slave.inc
RESET SLAVE;
+connection master;
SHOW TABLES LIKE 't_';
Tables_in_test (t_)
SHOW TRIGGERS;
Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation
RESET MASTER;
-START SLAVE;
+connection slave;
+include/start_slave.inc
---> Test for BUG#20438
---> Preparing environment...
----> connection: master
+connection master;
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
---> Synchronizing slave with master...
+connection slave;
----> connection: master
+connection master;
---> Creating objects...
CREATE TABLE t1(c INT);
@@ -939,7 +1032,7 @@ c
10
---> Synchronizing slave with master...
----> connection: master
+connection slave;
---> Checking on slave...
SELECT * FROM t1;
@@ -949,11 +1042,14 @@ SELECT * FROM t2;
c
10
----> connection: master
+connection master;
---> Cleaning up...
DROP TABLE t1;
DROP TABLE t2;
+connection slave;
+connection master;
+connection master;
drop table if exists t1;
create table t1(a int, b varchar(50));
drop trigger not_a_trigger;
@@ -975,11 +1071,15 @@ a b
1 In trigger t1_bi
2 b
3 c
+connection slave;
select * from t1;
a b
1 In trigger t1_bi
2 b
3 c
+connection master;
drop table if exists t1,t11;
Warnings:
-Note 1051 Unknown table 't11'
+Note 1051 Unknown table 'test.t11'
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_trunc_temp.result b/mysql-test/suite/engines/funcs/r/rpl_trunc_temp.result
index 44624a38875..35cd91d795b 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_trunc_temp.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_trunc_temp.result
@@ -1,22 +1,26 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
create temporary table t1 (n int);
insert into t1 values(1);
+connection slave;
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
+connection master;
delete from t1;
+connection slave;
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
+connection master;
truncate t1;
+connection slave;
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
+disconnect master;
+connection slave;
show status like 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 0
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_user_variables.result b/mysql-test/suite/engines/funcs/r/rpl_user_variables.result
index ed0d2782394..0efdbff5522 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_user_variables.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_user_variables.result
@@ -1,10 +1,8 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
+connection slave;
reset master;
+connection master;
create table t1(n char(30));
set @i1:=12345678901234, @i2:=-12345678901234, @i3:=0, @i4:=-1;
set @s1:='This is a test', @r1:=12.5, @r2:=-12.5;
@@ -23,6 +21,7 @@ set @a:=5;
insert into t1 values (@a),(@a);
select * from t1 where n = '<nonexistant>';
n
+connection master1;
insert into t1 values (@a),(@a),(@a*5);
SELECT * FROM t1 ORDER BY n;
n
@@ -51,6 +50,7 @@ abcn1
abcn1n2
abc\def
This is a test
+connection slave;
SELECT * FROM t1 ORDER BY n;
n
NULL
@@ -78,6 +78,235 @@ abcn1
abcn1n2
abc\def
This is a test
+connection master;
insert into t1 select * FROM (select @var1 union select @var2) AS t2;
drop table t1;
-stop slave;
+End of 4.1 tests.
+DROP TABLE IF EXISTS t20;
+DROP TABLE IF EXISTS t21;
+DROP PROCEDURE IF EXISTS test.insert;
+CREATE TABLE t20 (a VARCHAR(20));
+CREATE TABLE t21 (a VARCHAR(20));
+CREATE PROCEDURE test.insert()
+BEGIN
+IF (@VAR)
+THEN
+INSERT INTO test.t20 VALUES ('SP_TRUE');
+ELSE
+INSERT INTO test.t20 VALUES ('SP_FALSE');
+END IF;
+END|
+CREATE TRIGGER test.insert_bi BEFORE INSERT
+ON test.t20 FOR EACH ROW
+BEGIN
+IF (@VAR)
+THEN
+INSERT INTO test.t21 VALUES ('TRIG_TRUE');
+ELSE
+INSERT INTO test.t21 VALUES ('TRIG_FALSE');
+END IF;
+END|
+connection slave;
+connection master;
+SET @VAR=0;
+CALL test.insert();
+SET @VAR=1;
+CALL test.insert();
+Check the tables for correct data
+SELECT * FROM t20;
+a
+SP_FALSE
+SP_TRUE
+SELECT * FROM t21;
+a
+TRIG_FALSE
+TRIG_TRUE
+connection slave;
+Check the tables for correct data and it matches master
+SELECT * FROM t20;
+a
+SP_FALSE
+SP_TRUE
+SELECT * FROM t21;
+a
+TRIG_FALSE
+TRIG_TRUE
+connection master;
+DROP TABLE t20;
+DROP TABLE t21;
+DROP PROCEDURE test.insert;
+DROP TABLE IF EXISTS t1;
+DROP FUNCTION IF EXISTS test.square;
+CREATE TABLE t1 (i INT);
+CREATE FUNCTION test.square() RETURNS INTEGER DETERMINISTIC RETURN
+(@var * @var);
+SET @var = 1;
+INSERT INTO t1 VALUES (square());
+SET @var = 2;
+INSERT INTO t1 VALUES (square());
+SET @var = 3;
+INSERT INTO t1 VALUES (square());
+SET @var = 4;
+INSERT INTO t1 VALUES (square());
+SET @var = 5;
+INSERT INTO t1 VALUES (square());
+Retrieve the values from the table
+SELECT * FROM t1;
+i
+1
+4
+9
+16
+25
+connection slave;
+Retrieve the values from the table and verify they are the same as on master
+SELECT * FROM t1;
+i
+1
+4
+9
+16
+25
+connection master;
+DROP TABLE t1;
+DROP FUNCTION test.square;
+DROP TABLE IF EXISTS t1;
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+CREATE TABLE t1(a int);
+CREATE FUNCTION f1() returns int deterministic BEGIN
+return @a;
+END |
+CREATE FUNCTION f2() returns int deterministic BEGIN
+IF (@b > 0) then
+SET @c = (@a + @b);
+else
+SET @c = (@a - 1);
+END if;
+return @c;
+END |
+connection slave;
+connection master;
+SET @a=500;
+INSERT INTO t1 values(f1());
+SET @b = 125;
+SET @c = 1;
+INSERT INTO t1 values(f2());
+Retrieve the values from the table
+connection slave;
+connection master;
+SELECT * from t1;
+a
+500
+625
+connection slave;
+Check the tables for correct data and it matches master
+SELECT * from t1;
+a
+500
+625
+connection master;
+DROP TABLE t1;
+DROP FUNCTION f1;
+DROP FUNCTION f2;
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+CREATE TABLE t1 (i int);
+CREATE TABLE t2 (k int);
+CREATE trigger t1_bi before INSERT on t1 for each row BEGIN
+INSERT INTO t2 values (@a);
+SET @a:=42;
+INSERT INTO t2 values (@a);
+END |
+connection slave;
+connection master;
+SET @a:=100;
+INSERT INTO t1 values (5);
+Check to see that data was inserted correctly in both tables
+SELECT * from t1;
+i
+5
+SELECT * from t2;
+k
+100
+42
+connection slave;
+Check the tables for correct data and it matches master
+SELECT * from t1;
+i
+5
+SELECT * from t2;
+k
+100
+42
+connection master;
+drop table t1, t2;
+connection master;
+create table t1(a int, b int);
+prepare s1 from 'insert into t1 values (@x:=@x+1, ?)';
+set @x=1;
+execute s1 using @x;
+select * from t1;
+a b
+2 1
+connection slave;
+connection slave;
+select * from t1;
+a b
+2 1
+connection master;
+drop table t1;
+connection master;
+create table t1(a int);
+insert into t1 values (1),(2);
+prepare s1 from 'insert into t1 select a from t1 limit ?';
+set @x='1.1';
+execute s1 using @x;
+select * from t1;
+a
+1
+2
+1
+connection slave;
+connection slave;
+select * from t1;
+a
+1
+2
+1
+connection master;
+drop table t1;
+End of 5.0 tests.
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+CREATE TABLE t1 (i INT);
+CREATE FUNCTION f1() RETURNS INT RETURN @a;
+CREATE
+FUNCTION f2() RETURNS INT BEGIN
+INSERT INTO t1 VALUES (10 + @a);
+RETURN 0;
+END|
+connection slave;
+connection master;
+SET @a:=123;
+SELECT f1(), f2();
+f1() f2()
+123 0
+Check to see that data was inserted correctly
+INSERT INTO t1 VALUES(f1());
+SELECT * FROM t1;
+i
+133
+123
+connection slave;
+Check the table for correct data and it matches master
+SELECT * FROM t1;
+i
+133
+123
+connection master;
+DROP FUNCTION f1;
+DROP FUNCTION f2;
+DROP TABLE t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_variables.result b/mysql-test/suite/engines/funcs/r/rpl_variables.result
index 6a9af27179e..547c324fdf8 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_variables.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_variables.result
@@ -1,9 +1,5 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
set @my_slave_net_timeout =@@global.slave_net_timeout;
set @my_sql_slave_skip_counter =@@global.sql_slave_skip_counter;
set global slave_net_timeout=100;
@@ -19,3 +15,4 @@ Variable_name Value
slave_skip_errors 3,100,137,643,1752
set global slave_net_timeout=@my_slave_net_timeout;
set global sql_slave_skip_counter=@my_sql_slave_skip_counter;
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/r/rpl_view.result b/mysql-test/suite/engines/funcs/r/rpl_view.result
index 00d9bfbf380..68a149720b0 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_view.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_view.result
@@ -1,12 +1,10 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
+include/master-slave.inc
+[connection master]
drop table if exists t1,v1;
drop view if exists t1,v1;
+connection slave;
reset master;
+connection master;
create table t1 (a int);
insert into t1 values (1);
create view v1 as select a from t1;
@@ -15,45 +13,57 @@ select * from v1 order by a;
a
1
2
+connection slave;
select * from v1 order by a;
a
1
2
+connection master;
update v1 set a=3 where a=1;
select * from v1 order by a;
a
2
3
+connection slave;
select * from v1 order by a;
a
2
3
+connection master;
delete from v1 where a=2;
select * from v1 order by a;
a
3
+connection slave;
select * from v1 order by a;
a
3
+connection master;
alter view v1 as select a as b from t1;
+connection slave;
select * from v1 order by 1;
b
3
+connection master;
drop view v1;
+connection slave;
select * from v1 order by a;
ERROR 42S02: Table 'test.v1' doesn't exist
+connection master;
drop table t1;
+connection slave;
---> Test for BUG#20438
---> Preparing environment...
----> connection: master
+connection master;
DROP TABLE IF EXISTS t1;
DROP VIEW IF EXISTS v1;
---> Synchronizing slave with master...
+connection slave;
----> connection: master
+connection master;
---> Creating objects...
CREATE TABLE t1(c INT);
@@ -68,21 +78,22 @@ c
1
---> Synchronizing slave with master...
----> connection: master
-
----> Checking on slave...
+connection slave;
SELECT * FROM t1;
c
1
-
----> connection: master
+connection master;
---> Cleaning up...
DROP VIEW v1;
DROP TABLE t1;
+connection slave;
+connection master;
+connection master;
create table t1(a int, b int);
insert into t1 values (1, 1), (1, 2), (1, 3);
create view v1(a, b) as select a, sum(b) from t1 group by a;
+connection slave;
explain v1;
Field Type Null Key Default Extra
a int(11) YES NULL
@@ -93,6 +104,35 @@ v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI
select * from v1;
a b
1 6
+connection master;
drop table t1;
drop view v1;
+connection slave;
+connection master;
+CREATE TABLE t1(a INT);
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE VIEW v1 AS SELECT * FROM t1;
+ERROR 42S01: Table 'v1' already exists
+DROP VIEW v1;
+DROP TABLE t1;
+connection slave;
+connection master;
+CREATE TABLE t1 (a INT);
+# create view as output from mysqldump 10.11 (5.0.62)
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` < 3) */
+/*!50002 WITH CASCADED CHECK OPTION */;
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where `t1`.`a` < 3 WITH CASCADED CHECK OPTION latin1 latin1_swedish_ci
+connection slave;
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where `t1`.`a` < 3 WITH CASCADED CHECK OPTION latin1 latin1_swedish_ci
+connection master;
+DROP VIEW v1;
+DROP TABLE t1;
+connection slave;
End of 5.0 tests
+include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl000010-slave.opt b/mysql-test/suite/engines/funcs/t/rpl000010-slave.opt
deleted file mode 100644
index 0dbfb311e33..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl000010-slave.opt
+++ /dev/null
@@ -1 +0,0 @@
---disconnect-slave-event-count=2
diff --git a/mysql-test/suite/engines/funcs/t/rpl000010.test b/mysql-test/suite/engines/funcs/t/rpl000010.test
deleted file mode 100644
index 261b9148774..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl000010.test
+++ /dev/null
@@ -1,19 +0,0 @@
-# This tests the offset off by 22 mystery bug
-# Must run slave with --disconnect-slave-event-count=1 --master-connect-retry=1
-
-source include/master-slave.inc;
-
-create table t1 (n int not null auto_increment primary key);
-insert into t1 values(NULL);
-insert into t1 values(2);
-save_master_pos;
-connection slave;
-sync_with_master;
-select n from t1;
-connection master;
-drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl000011.test b/mysql-test/suite/engines/funcs/t/rpl000011.test
deleted file mode 100644
index 32f6227f7c5..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl000011.test
+++ /dev/null
@@ -1,17 +0,0 @@
-source include/master-slave.inc;
-
-create table t1 (n int);
-insert into t1 values(1);
-sync_slave_with_master;
-stop slave;
-start slave;
-connection master;
-insert into t1 values(2);
-#let slave catch up
-sync_slave_with_master;
-select * from t1;
-connection master;
-drop table t1;
-sync_slave_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl000013.test b/mysql-test/suite/engines/funcs/t/rpl000013.test
deleted file mode 100644
index 2c727107563..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl000013.test
+++ /dev/null
@@ -1,61 +0,0 @@
-# This test is to verify that DROP TEMPORARY TABLE
-# is automatically binlogged and sent to slave
-# when a temp table is dropped by disconnection
-# of a master's conection.
-# So it does not apply to row-based, where we neither need
-# nor do this automatic binlogging. And if we run this test
-# in row-based, it hangs waiting for an offset which is never
-# reached (the "sync_with_master 1"), logically.
-
---source include/have_binlog_format_mixed_or_statement.inc
-source include/master-slave.inc;
-save_master_pos;
-connection slave;
-sync_with_master;
-connection master;
-
---disable_query_log
-CALL mtr.add_suppression(" Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-create table t2(n int);
-create temporary table t1 (n int);
-insert into t1 values(1),(2),(3);
---disable_warnings
-insert into t2 select * from t1;
---enable_warnings
-connection master1;
-create temporary table t1 (n int);
-insert into t1 values (4),(5);
---disable_warnings
-insert into t2 select * from t1 as t10;
---enable_warnings
-save_master_pos;
-disconnect master;
-connection slave;
-#add 1 to catch drop table
-sync_with_master 1;
-connection master1;
-insert into t2 values(6);
-save_master_pos;
-disconnect master1;
-connection slave;
-# same trick to go one more event
-sync_with_master 1;
-select * from t2;
-show status like 'Slave_open_temp_tables';
-#
-# Clean up
-#
-connect (master2,localhost,root,,);
-connection master2;
-
-# We will get a warning for t1 as this is a temporary table that doesn't
-# exist in this connection.
-
-drop table if exists t1,t2;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl000017-slave.opt b/mysql-test/suite/engines/funcs/t/rpl000017-slave.opt
deleted file mode 100644
index 58a964c90d0..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl000017-slave.opt
+++ /dev/null
@@ -1 +0,0 @@
---skip-slave-start
diff --git a/mysql-test/suite/engines/funcs/t/rpl_000010.test b/mysql-test/suite/engines/funcs/t/rpl_000010.test
new file mode 100644
index 00000000000..155b54572e3
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/t/rpl_000010.test
@@ -0,0 +1,2 @@
+--source suite/rpl/t/rpl_000010.test
+
diff --git a/mysql-test/suite/engines/funcs/t/rpl_000011.test b/mysql-test/suite/engines/funcs/t/rpl_000011.test
new file mode 100644
index 00000000000..f7fc0f4a310
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/t/rpl_000011.test
@@ -0,0 +1,18 @@
+--source include/master-slave.inc
+
+create table t1 (n int);
+insert into t1 values(1);
+--sync_slave_with_master
+--source include/stop_slave.inc
+--source include/start_slave.inc
+connection master;
+insert into t1 values(2);
+#let slave catch up
+--sync_slave_with_master
+select * from t1;
+connection master;
+drop table t1;
+--sync_slave_with_master
+
+# End of 4.1 tests
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_000013.test b/mysql-test/suite/engines/funcs/t/rpl_000013.test
new file mode 100644
index 00000000000..c2f6c114ad6
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/t/rpl_000013.test
@@ -0,0 +1,2 @@
+--source suite/rpl/t/rpl_000013.test
+
diff --git a/mysql-test/suite/engines/funcs/t/rpl_000015.test b/mysql-test/suite/engines/funcs/t/rpl_000015.test
index 817ed6f407c..8996affe667 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_000015.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_000015.test
@@ -3,49 +3,36 @@
# Change Date: 2006-01-17
# Change: added order by in select
#####################
-source include/master-slave.inc;
+--source include/master-slave.inc
reset master;
show master status;
save_master_pos;
connection slave;
-stop slave;
+--source include/stop_slave.inc
reset slave;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 5 # 8 # 9 # 23 # 33 #
-show slave status;
+--let $status_items= Slave_IO_Running, Slave_SQL_Running, Last_SQL_Errno, Last_SQL_Error, Exec_Master_Log_Pos
+--source include/show_slave_status.inc
change master to master_host='127.0.0.1';
# The following needs to be cleaned up when change master is fixed
---vertical_results
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 5 # 8 # 9 # 23 # 33 #
-show slave status;
+--source include/show_slave_status.inc
--replace_result $MASTER_MYPORT MASTER_PORT
-eval change master to master_host='127.0.0.1',master_user='root',
- master_password='',master_port=$MASTER_MYPORT;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 5 # 8 # 9 # 23 # 33 #
-show slave status;
-start slave;
+eval change master to master_host='127.0.0.1',master_user='root',
+master_password='',master_port=$MASTER_MYPORT;
+--source include/start_slave.inc
sync_with_master;
---vertical_results
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 5 # 8 # 9 # 23 # 33 #
---replace_column 33 #
-show slave status;
+--source include/show_slave_status.inc
+
connection master;
---disable_warnings
-drop table if exists t1;
---enable_warnings
+
create table t1 (n int, PRIMARY KEY(n));
insert into t1 values (10),(45),(90);
-sync_slave_with_master;
-connection slave;
+--sync_slave_with_master
+
SELECT * FROM t1 ORDER BY n;
connection master;
SELECT * FROM t1 ORDER BY n;
drop table t1;
-sync_slave_with_master;
+--sync_slave_with_master
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_000017.test b/mysql-test/suite/engines/funcs/t/rpl_000017.test
new file mode 100644
index 00000000000..f498ce10a58
--- /dev/null
+++ b/mysql-test/suite/engines/funcs/t/rpl_000017.test
@@ -0,0 +1,2 @@
+--source suite/rpl/t/rpl_000017.test
+
diff --git a/mysql-test/suite/engines/funcs/t/rpl_LD_INFILE.test b/mysql-test/suite/engines/funcs/t/rpl_LD_INFILE.test
index ae647ed6648..e896d62a0ab 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_LD_INFILE.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_LD_INFILE.test
@@ -1,38 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-#############################################################################
-# TEST: To test the LOAD DATA INFILE in rbr #
-#############################################################################
-
-# Includes
--- source include/master-slave.inc
-
-# Begin clean up test section
---disable_warnings
-connection master;
-DROP TABLE IF EXISTS test.t1;
---enable_warnings
-
-# Section 1 test
-CREATE TABLE test.t1 (a VARCHAR(255), PRIMARY KEY(a));
-LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
-DELETE FROM test.t1 WHERE a = 'abashed';
-DELETE FROM test.t1;
-LOAD DATA INFILE '../../std_data/words2.dat' INTO TABLE test.t1;
-
-
-SELECT * FROM test.t1 ORDER BY a DESC;
-save_master_pos;
-sync_slave_with_master;
-connection slave;
-SELECT * FROM test.t1 ORDER BY a DESC;
-
-# Cleanup
-#show binlog events;
-connection master;
-DROP TABLE test.t1;
-sync_slave_with_master;
-
-# End of 5.0 test case
+--source suite/rpl/t/rpl_LD_INFILE.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_REDIRECT.test b/mysql-test/suite/engines/funcs/t/rpl_REDIRECT.test
deleted file mode 100644
index 078d1048794..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl_REDIRECT.test
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Test of automatic redirection of queries to master/slave.
-#
-
-source include/master-slave.inc;
-# We disable this for now as PS doesn't handle redirection
---disable_ps_protocol
-
-#first, make sure the slave has had enough time to register
-save_master_pos;
-connection slave;
-sync_with_master;
-
-#discover slaves
-connection master;
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 8 # 9 # 16 # 23 # 33 #
-SHOW SLAVE STATUS;
---replace_result $SLAVE_MYPORT SLAVE_PORT
-SHOW SLAVE HOSTS;
-rpl_probe;
-
-#turn on master/slave query direction auto-magic
-enable_rpl_parse;
-create table t1 ( n int);
-insert into t1 values (1),(2),(3),(4);
-disable_rpl_parse;
-save_master_pos;
-connection slave;
-sync_with_master;
-insert into t1 values(5);
-connection master;
-enable_rpl_parse;
-# The first of the queries will be sent to the slave, the second to the master.
-SELECT * FROM t1 ORDER BY n;
-SELECT * FROM t1 ORDER BY n;
-disable_rpl_parse;
-SELECT * FROM t1 ORDER BY n;
-connection slave;
-SELECT * FROM t1 ORDER BY n;
-
-# Cleanup
-connection master;
-drop table t1;
-sync_slave_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_alter.test b/mysql-test/suite/engines/funcs/t/rpl_alter.test
index 576376a0264..12360f5a3e8 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_alter.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_alter.test
@@ -1,24 +1,2 @@
-source include/master-slave.inc;
---disable_warnings
-drop database if exists mysqltest;
---enable_warnings
-create database mysqltest;
+--source suite/rpl/t/rpl_alter.test
-create table mysqltest.t1 ( n int);
-alter table mysqltest.t1 add m int;
-insert into mysqltest.t1 values (1,2);
-create table mysqltest.t2 (n int);
-insert into mysqltest.t2 values (45);
-rename table mysqltest.t2 to mysqltest.t3, mysqltest.t1 to mysqltest.t2;
-save_master_pos;
-connection slave;
-sync_with_master;
-select * from mysqltest.t2;
-select * from mysqltest.t3;
-connection master;
-drop database mysqltest;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_alter_db.test b/mysql-test/suite/engines/funcs/t/rpl_alter_db.test
index 17ba06ce063..097eaf9071b 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_alter_db.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_alter_db.test
@@ -1,12 +1,2 @@
-source include/master-slave.inc;
-connection master;
-use mysql; # to be different from initial `test' db of mysqltest client
-alter database collate latin1_bin;
-save_master_pos;
+--source suite/rpl/t/rpl_alter_db.test
-connection slave;
-sync_with_master;
-
-# Restoring to the original state
-connection master;
-alter database collate latin1_swedish_ci;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_bit.test b/mysql-test/suite/engines/funcs/t/rpl_bit.test
index 7f85313ae4c..822fae57f68 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_bit.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_bit.test
@@ -1,93 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Sept/15/2005 #
-#############################################################################
-# Test: To test the replication of the bit field #
-#############################################################################
-# Change Author: JBM
-# Change Date: 2006-01-16
-##########
+--source suite/rpl/t/rpl_bit.test
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP TABLE IF EXISTS test.t1;
---enable_warnings
-# End of cleanup
-
-# Begin test section 1
-
-CREATE TABLE test.t1 (
- dummyKey INTEGER NOT NULL,
- f01 TINYINT,
- f10 TINYINT,
- f12 TINYINT,
- f15 TINYINT,
- f16 TINYINT,
- f7 TINYINT,
- f9 TINYINT,
- f29 TINYINT,
- f0 TINYINT,
- fA1 TINYINT,
- C32 TINYINT,
- A42 TINYINT,
- CA3 TINYINT,
- A044 TINYINT,
- f001 TINYINT,
- A3002 TINYINT,
- fC003 TINYINT,
- CA300 TINYINT,
- A305 TINYINT,
- CA321 TINYINT,
- r001 TINYINT,
- bit1 BIT(6),
- bit2 BIT(6),
- bit3 BIT(6),
- State1 TINYINT,
- State2 TINYINT,
- State3 TINYINT,
- State4 TINYINT,
- SubState TINYINT,
- gState TINYINT,
- oSupp TINYINT,
- tSupp TINYINT,
- sSuppD TINYINT,
- mSuppf TINYINT,
- GSuppDf TINYINT,
- VNotSupp TINYINT,
- x034 TINYINT,
-PRIMARY KEY USING HASH (dummyKey) );
-
-LOCK TABLES test.t1 WRITE;
-INSERT INTO test.t1 VALUES (6,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'111111',b'111110',b'110101',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'111111',b'000000',b'100100',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (2,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'000000',b'101010',b'010101',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (3,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'101010',b'111111',b'000000',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (4,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (5,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (7,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (8,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-UNLOCK TABLES;
-
-
-SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
-SELECT hex(bit1) FROM test.t1 ORDER BY bit1;
-SELECT hex(bit2) from test.t1 ORDER BY bit2;
-SELECT hex(bit3) from test.t1 ORDER BY bit3;
-save_master_pos;
-
-connection slave;
-sync_with_master;
-SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034 FROM test.t1;
-SELECT hex(bit1) FROM test.t1 ORDER BY bit1;
-SELECT hex(bit2) from test.t1 ORDER BY bit2;
-SELECT hex(bit3) from test.t1 ORDER BY bit3;
-
-connection master;
-DROP TABLE IF EXISTS test.t1;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_bit_npk.test b/mysql-test/suite/engines/funcs/t/rpl_bit_npk.test
index 12b587919f9..d2214ada074 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_bit_npk.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_bit_npk.test
@@ -1,116 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Sept/15/2005 #
-#############################################################################
-# Test: To test the replication of the bit field #
-#############################################################################
+--source suite/rpl/t/rpl_bit_npk.test
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP TABLE IF EXISTS test.t1;
---enable_warnings
-# End of cleanup
-
-# Begin test section 1
-
-CREATE TABLE test.t1 (
- dummyKey INTEGER NOT NULL,
- f01 TINYINT,
- f10 TINYINT,
- f12 TINYINT,
- f15 TINYINT,
- f16 TINYINT,
- f7 TINYINT,
- f9 TINYINT,
- f29 TINYINT,
- f0 TINYINT,
- fA1 TINYINT,
- C32 TINYINT,
- A42 TINYINT,
- CA3 TINYINT,
- A044 TINYINT,
- f001 TINYINT,
- A3002 TINYINT,
- fC003 TINYINT,
- CA300 TINYINT,
- A305 TINYINT,
- CA321 TINYINT,
- r001 TINYINT,
- bit1 BIT(6),
- bit2 BIT(6),
- bit3 BIT(6),
- State1 TINYINT,
- State2 TINYINT,
- State3 TINYINT,
- State4 TINYINT,
- SubState TINYINT,
- gState TINYINT,
- oSupp TINYINT,
- tSupp TINYINT,
- sSuppD TINYINT,
- mSuppf TINYINT,
- GSuppDf TINYINT,
- VNotSupp TINYINT,
- x034 TINYINT);
-
-LOCK TABLES test.t1 WRITE;
-INSERT INTO test.t1 VALUES (6,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'111111',b'111110',b'110101',4,5,5,5,5,5,5,5,5,5,3,NULL,1);
-INSERT INTO test.t1 VALUES (1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'111111',b'000000',b'100100',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (2,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'000000',b'101010',b'010101',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (3,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'101010',b'111111',b'000000',4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (4,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,b'0',1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (5,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (7,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-INSERT INTO test.t1 VALUES (8,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,4,5,5,5,5,5,5,5,5,5,3,2,1);
-UNLOCK TABLES;
-
-UPDATE test.t1 set x034 = 50 where bit3 = b'000000';
-UPDATE test.t1 set VNotSupp = 33 where bit1 = b'0';
-SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
- FROM test.t1
- ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
-SELECT hex(bit1) from test.t1 ORDER BY bit1;
-SELECT hex(bit2) from test.t1 ORDER BY bit2;
-SELECT hex(bit3) from test.t1 ORDER BY bit3;
-save_master_pos;
-
-connection slave;
-sync_with_master;
-SELECT oSupp, sSuppD, GSuppDf, VNotSupp, x034
- FROM test.t1
- ORDER BY oSupp, sSuppD, GSuppDf, VNotSupp, x034;
-SELECT hex(bit1) from test.t1 ORDER BY bit1;
-SELECT hex(bit2) from test.t1 ORDER BY bit2;
-SELECT hex(bit3) from test.t1 ORDER BY bit3;
-
-connection master;
-CREATE TABLE test.t2 (a INT, b BIT(1));
-INSERT INTO test.t2 VALUES (1, b'0');
-INSERT INTO test.t2 VALUES (1, b'1');
-UPDATE test.t2 SET a = 2 WHERE b = b'1';
-
-CREATE TABLE test.t3 (a INT, b INT);
-INSERT INTO test.t3 VALUES (1, NULL);
-INSERT INTO test.t3 VALUES (1, 0);
-UPDATE test.t3 SET a = 2 WHERE b = 0;
-
-SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
-SELECT * FROM test.t3 ORDER BY a,b;
-save_master_pos;
-
-connection slave;
-sync_with_master;
-SELECT a, hex(b) FROM test.t2 ORDER BY a,b;
-SELECT * FROM test.t3 ORDER BY a,b;
-
-connection master;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_change_master.test b/mysql-test/suite/engines/funcs/t/rpl_change_master.test
index c031464c95e..e7e70bd2ac6 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_change_master.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_change_master.test
@@ -1,37 +1,2 @@
-# Verify that after CHANGE MASTER, replication (I/O thread and SQL
-# thread) restart from where SQL thread left, not from where
-# I/O thread left (some old bug fixed in 4.0.17)
+--source suite/rpl/t/rpl_change_master.test
-source include/master-slave.inc;
-
-connection master;
-# Make SQL slave thread advance a bit
-create table t1(n int);
-sync_slave_with_master;
-select * from t1;
-# Now stop it and make I/O slave thread be ahead
-stop slave sql_thread;
-connection master;
-insert into t1 values(1);
-insert into t1 values(2);
-save_master_pos;
-connection slave;
---real_sleep 3 # wait for I/O thread to have read updates
-stop slave;
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 8 # 9 # 23 # 33 #
-show slave status;
-change master to master_user='root';
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 8 # 9 # 23 # 33 #
-show slave status;
-start slave;
-sync_with_master;
-select * from t1;
-connection master;
-drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_create_database.test b/mysql-test/suite/engines/funcs/t/rpl_create_database.test
index 70cff8daca2..8690de2a604 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_create_database.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_create_database.test
@@ -1,72 +1,2 @@
-#
-# Tests for replication of statements that manipulate databases.
-#
-# For this test file, we have a number of databases. All databases
-# with "greek" names will be replicated on the slave, while other names
-# (e.g., american) will not be replicated.
-#
+--source suite/rpl/t/rpl_create_database.test
-source include/master-slave.inc;
-
-# Bug#6391 (binlog-do-db rules ignored)
-# In this case, 'mysqltest_bob' should not be replicated to the slave.
---disable_warnings
-DROP DATABASE IF EXISTS mysqltest_prometheus;
-DROP DATABASE IF EXISTS mysqltest_sisyfos;
-DROP DATABASE IF EXISTS mysqltest_bob;
-sync_slave_with_master;
-# This database is not replicated
-DROP DATABASE IF EXISTS mysqltest_bob;
---enable_warnings
-
-connection master;
-CREATE DATABASE mysqltest_prometheus;
-CREATE DATABASE mysqltest_sisyfos;
-CREATE DATABASE mysqltest_bob;
-
-USE mysqltest_sisyfos;
-# These should be replicated
-CREATE TABLE t1 (b int);
-INSERT INTO t1 VALUES(1);
-
-USE mysqltest_bob;
-# These should *not* be replicated
-CREATE TABLE t2 (b int);
-INSERT INTO t2 VALUES(2);
-
-# Current database is now 'mysqltest_bob'
-# The following should be replicated
-ALTER DATABASE mysqltest_sisyfos CHARACTER SET latin1;
-
-USE mysqltest_sisyfos;
-# The following should *not* be replicated
-ALTER DATABASE mysqltest_bob CHARACTER SET latin1;
-
-SHOW DATABASES;
-sync_slave_with_master;
-SHOW DATABASES;
-
-connection master;
-DROP DATABASE IF EXISTS mysqltest_sisyfos;
-USE mysqltest_prometheus;
-CREATE TABLE t1 (a INT);
-INSERT INTO t1 VALUES (1);
-CREATE DATABASE mysqltest_sisyfos;
-USE mysqltest_sisyfos;
-CREATE TABLE t2 (a INT);
-let $VERSION=`select version()`;
-SHOW DATABASES;
-sync_slave_with_master;
-SHOW DATABASES;
-USE mysqltest_prometheus;
-SHOW TABLES;
-USE mysqltest_sisyfos;
-SHOW TABLES;
-
-connection master;
-DROP DATABASE IF EXISTS mysqltest_prometheus;
-DROP DATABASE IF EXISTS mysqltest_sisyfos;
-DROP DATABASE IF EXISTS mysqltest_bob;
-sync_slave_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_do_grant.test b/mysql-test/suite/engines/funcs/t/rpl_do_grant.test
index 4e398114269..95e90ce1bc3 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_do_grant.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_do_grant.test
@@ -1,98 +1,2 @@
-# Works in statement-based and row-based binlogging.
-# Test that GRANT and other user management commands are replicated to the slave
+--source suite/rpl/t/rpl_do_grant.test
--- source include/master-slave.inc
-
-# do not be influenced by other tests.
-connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-save_master_pos;
-connection slave;
-sync_with_master;
-# if these DELETE did nothing on the master, we need to do them manually on the
-# slave.
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-
-# test replication of GRANT
-connection master;
-grant select on *.* to rpl_do_grant@localhost;
-grant drop on test.* to rpl_do_grant@localhost;
-save_master_pos;
-connection slave;
-sync_with_master;
-show grants for rpl_do_grant@localhost;
-
-# test replication of SET PASSWORD
-connection master;
-set password for rpl_do_grant@localhost=password("does it work?");
-save_master_pos;
-connection slave;
-sync_with_master;
-select password<>_binary'' from mysql.user where user=_binary'rpl_do_grant';
-
-# clear what we have done, to not influence other tests.
-connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-sync_slave_with_master;
-# The mysql database is not replicated, so we have to do the deletes
-# manually on the slave as well.
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-
-# End of 4.1 tests
-
-connection master;
---error 1141
-show grants for rpl_do_grant@localhost;
-connection slave;
---error 1141
-show grants for rpl_do_grant@localhost;
-
-connection master;
-create user rpl_do_grant@localhost;
-show grants for rpl_do_grant@localhost;
---error 1141
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
-show grants for rpl_do_grant@localhost;
---error 1141
-show grants for rpl_do_grant2@localhost;
-
-connection master;
-rename user rpl_do_grant@localhost to rpl_do_grant2@localhost;
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
-show grants for rpl_do_grant2@localhost;
-
-connection master;
-grant DELETE,INSERT on mysqltest1.* to rpl_do_grant2@localhost;
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
-show grants for rpl_do_grant2@localhost;
-
-connection master;
-revoke DELETE on mysqltest1.* from rpl_do_grant2@localhost;
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
-show grants for rpl_do_grant2@localhost;
-
-connection master;
-revoke all privileges, grant option from rpl_do_grant2@localhost;
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
-show grants for rpl_do_grant2@localhost;
-
-connection master;
-drop user rpl_do_grant2@localhost;
---error 1141
-show grants for rpl_do_grant2@localhost;
-sync_slave_with_master;
---error 1141
-show grants for rpl_do_grant2@localhost;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_drop.test b/mysql-test/suite/engines/funcs/t/rpl_drop.test
index b38007a755f..7dda6987502 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_drop.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_drop.test
@@ -1,16 +1,2 @@
-# Testcase for BUG#4552 (DROP on two tables, one of which does not
-# exist, must be binlogged with a non-zero error code)
-source include/master-slave.inc;
---disable_warnings
-drop table if exists t1, t2;
---enable_warnings
-create table t1 (a int);
---error 1051
-drop table t1, t2;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
-
+--source suite/rpl/t/rpl_drop.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_drop_db.test b/mysql-test/suite/engines/funcs/t/rpl_drop_db.test
index fb26cc20f8a..54d5300be14 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_drop_db.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_drop_db.test
@@ -1,61 +1,2 @@
-# test case for BUG#4680 -- if there are extra files in the db directory
-# dropping the db on the master causes replication problems
+--source suite/rpl/t/rpl_drop_db.test
--- source include/master-slave.inc
-connection master;
-
---disable_warnings
-drop database if exists mysqltest1;
---enable_warnings
-create database mysqltest1;
-create table mysqltest1.t1 (n int);
-insert into mysqltest1.t1 values (1);
-select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
-create table mysqltest1.t2 (n int);
-create table mysqltest1.t3 (n int);
---replace_result \\ /
---error 1010
-drop database mysqltest1;
-use mysqltest1;
-show tables;
-
-# test the branch of the code that deals with the query buffer overflow
-
---disable_query_log
-let $1=50;
-while ($1)
-{
- eval create table mysqltest1.mysqltest_long_table_name$1 (n int);
- dec $1;
-}
---enable_query_log
-
---replace_result \\ /
---error 1010
-drop database mysqltest1;
-use mysqltest1;
-show tables;
-use test;
-create table t1 (n int);
-insert into t1 values (1234);
-sync_slave_with_master;
-
-connection slave;
-use mysqltest1;
-show tables;
-use test;
-select * from t1;
-drop table t1;
-
-#cleanup
-connection slave;
-stop slave;
-drop database mysqltest1;
-
-connection master;
-# Remove the "extra" file created above
---remove_file $MYSQLTEST_VARDIR/mysqld.1/data/mysqltest1/f1.txt
-drop database mysqltest1;
-
-use test;
-drop table t1;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_dual_pos_advance.test b/mysql-test/suite/engines/funcs/t/rpl_dual_pos_advance.test
index 518fa9df885..bf5e84152bc 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_dual_pos_advance.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_dual_pos_advance.test
@@ -6,7 +6,7 @@
# of their server id).
# It also will test BUG#13861.
-source include/master-slave.inc;
+--source include/master-slave.inc
# set up "dual head"
@@ -18,7 +18,7 @@ connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval change master to master_host="127.0.0.1",master_port=$SLAVE_MYPORT,master_user="root";
-start slave;
+--source include/start_slave.inc
# now we test it
@@ -68,7 +68,7 @@ sync_with_master;
# show tables;
-# start slave;
+# --source include/start_slave.inc
# BUG#13023 is that Exec_master_log_pos may stay too low "forever":
@@ -78,9 +78,7 @@ create table t4 (n int); # create 3 ignored events
create table t5 (n int);
create table t6 (n int);
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
connection slave;
@@ -97,12 +95,11 @@ show tables;
# cleanup
-stop slave;
+--source include/stop_slave.inc
reset slave;
drop table t1,t4,t5,t6; # add t2 and t3 later
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
# End of 4.1 tests
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_empty_master_crash.test b/mysql-test/suite/engines/funcs/t/rpl_empty_master_crash.test
deleted file mode 100644
index 707d1eca8c2..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl_empty_master_crash.test
+++ /dev/null
@@ -1,15 +0,0 @@
-source include/master-slave.inc;
-
---replace_column 1 # 8 # 9 # 16 # 23 # 33 #
-show slave status;
-
-#
-# Load table should not succeed on the master as this is not a slave
-#
---error 1218
-load table t1 from master;
-connection slave;
---error 1188
-load table t1 from master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test b/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test
index adf1526a657..1459b24799e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test
@@ -1,68 +1,2 @@
-# Test for
-# Bug #797: If a query is ignored on slave (replicate-ignore-table) the slave
-# still checks that it has the same error as on the master.
-##########################################################################
+--source suite/rpl/t/rpl_err_ignoredtable.test
--- source include/master-slave.inc
-
---disable_query_log
-call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-connection master;
-create table t1 (a int primary key);
-create table t4 (a int primary key);
-# generate an error that goes to the binlog
---error 1022, 1062, 1582
-insert into t1 values (1),(1);
-insert into t4 values (1),(2);
-save_master_pos;
-connection slave;
-# as the t1 table is ignored on the slave, the slave should be able to sync
-sync_with_master;
-# check that the table has been ignored, because otherwise the test is nonsense
-show tables like 't1';
-show tables like 't4';
-SELECT * FROM test.t4 ORDER BY a;
-connection master;
-drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# Now test that even critical errors (connection killed)
-# are ignored if rules allow it.
-# The "kill" idea was copied from rpl000001.test.
-
-connection master1;
-select get_lock('crash_lock%20C', 10);
-
-connection master;
-create table t2 (a int primary key);
-insert into t2 values(1);
-create table t3 (id int);
-insert into t3 values(connection_id());
-send update t2 set a = a + 1 + get_lock('crash_lock%20C', 10);
-
-connection master1;
-real_sleep 2;
-select (@id := id) - id from t3;
-kill @id;
-drop table t2,t3;
-insert into t4 values (3),(4);
-connection master;
---error 0,1053,2013,1317
-reap;
-connection master1;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t4 ORDER BY a;
-
-connection master1;
-DROP TABLE test.t4;
-save_master_pos;
-connection slave;
-sync_with_master;
-# End of 4.1 tests
-# Adding comment for force manual merge 5.0 -> wl1012. delete me if needed
diff --git a/mysql-test/suite/engines/funcs/t/rpl_flushlog_loop.test b/mysql-test/suite/engines/funcs/t/rpl_flushlog_loop.test
index 2e481f5e5e7..b4742246264 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_flushlog_loop.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_flushlog_loop.test
@@ -6,31 +6,22 @@
# Start replication master -> slave
#
connection slave;
---disable_warnings
-stop slave;
---enable_warnings
+
+--source include/stop_slave.inc
+
--replace_result $MASTER_MYPORT MASTER_PORT
eval change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=$MASTER_MYPORT;
-start slave;
-
+--source include/start_slave.inc
#
# Start replication slave -> master
#
connection master;
---disable_warnings
-stop slave;
---enable_warnings
+
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval change master to master_host='127.0.0.1',master_user='root',
master_password='',master_port=$SLAVE_MYPORT;
-start slave;
-
-#
-# Wait for start of slave IO and SQL threads
-#
-let $result_pattern= '%127.0.0.1%root%slave-bin.000001%slave-bin.000001%Yes%Yes%0%0%None%';
--- source $MYSQL_TEST_DIR/suite/engines/funcs/t/wait_slave_status.inc
+--source include/start_slave.inc
#
# Flush logs of slave
@@ -41,8 +32,6 @@ sleep 5;
#
# Show status of slave
#
---replace_result $SLAVE_MYPORT SLAVE_PORT
---replace_column 1 # 8 # 9 # 16 # 23 # 33 #
---vertical_results
-SHOW SLAVE STATUS;
-STOP SLAVE;
+--let $status_items= Slave_IO_Running, Slave_SQL_Running, Last_SQL_Errno, Last_SQL_Error, Exec_Master_Log_Pos
+--source include/stop_slave.inc
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_free_items.test b/mysql-test/suite/engines/funcs/t/rpl_free_items.test
index 043e84160b8..31d65396d36 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_free_items.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_free_items.test
@@ -1,22 +1,2 @@
-source include/master-slave.inc;
-create table t1 (a int);
-create table t2 (a int);
-disable_query_log;
-SET @query="INSERT INTO t2 SELECT * FROM t1 WHERE a REGEXP \"0\"";
-let $1 = 2000;
-while ($1)
-{
- eval SET @query=concat(@query, " OR a REGEXP '$1'");
- dec $1;
-}
-let $1=`select @query`;
-eval $1;
-enable_query_log;
-# I have seen the slave crash either now or at shutdown
-sync_slave_with_master;
-connection master;
-drop table t1;
-drop table t2;
-sync_slave_with_master;
+--source suite/rpl/t/rpl_free_items.test
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_get_lock.test b/mysql-test/suite/engines/funcs/t/rpl_get_lock.test
index c57e9313899..a9265c7f864 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_get_lock.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_get_lock.test
@@ -1,49 +1,2 @@
-source include/master-slave.inc;
+--source suite/rpl/t/rpl_get_lock.test
---disable_query_log
-call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-create table t1(n int);
---disable_warnings
-insert into t1 values(get_lock("lock",2));
---enable_warnings
-dirty_close master;
-connection master1;
-select get_lock("lock",2);
-
-select release_lock("lock");
-#ignore
-disable_query_log;
-let $1=2000;
-while ($1)
-{
- do get_lock("lock",2);
- do release_lock("lock");
- dec $1;
-}
-enable_query_log;
-save_master_pos;
-connection slave;
-sync_with_master;
-select get_lock("lock",3);
-select * from t1;
-# There is no point in testing REPLICATIION of the IS_*_LOCK
-# functions; slave does not run with the same concurrency context as
-# master (generally in slave we can't know that on master this lock
-# was already held by another connection and so that the the
-# get_lock() we're replicating timed out on master hence returned 0,
-# or that the is_free_lock() we're playing returned 0 etc.
-# But here all we do is test these functions outside of replication.
-select is_free_lock("lock"), is_used_lock("lock") = connection_id();
-explain extended select is_free_lock("lock"), is_used_lock("lock");
-# Check lock functions
-select is_free_lock("lock2");
-select is_free_lock(NULL);
-connection master1;
-drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_ignore_grant.test b/mysql-test/suite/engines/funcs/t/rpl_ignore_grant.test
index 2e6e2ce9a31..234fec3361e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_ignore_grant.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_ignore_grant.test
@@ -1,59 +1,2 @@
-# Test that GRANT is not replicated to the slave
-# when --replicate-wild-ignore-table=mysql.%
-# In BUG#980, this test would _randomly_ fail.
+--source suite/rpl/t/rpl_ignore_grant.test
-source include/master-slave.inc;
-
-# do not be influenced by other tests.
-connection master;
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-save_master_pos;
-connection slave;
-sync_with_master;
-# as these DELETE were not replicated, we need to do them manually on the
-# slave.
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-
-# test non-replication of GRANT
-connection master;
-grant select on *.* to rpl_ignore_grant@localhost;
-grant drop on test.* to rpl_ignore_grant@localhost;
-show grants for rpl_ignore_grant@localhost;
-save_master_pos;
-connection slave;
-sync_with_master;
---error 1141 #("no such grant for user")
-show grants for rpl_ignore_grant@localhost;
-# check it another way
-select count(*) from mysql.user where user=_binary'rpl_ignore_grant';
-select count(*) from mysql.db where user=_binary'rpl_ignore_grant';
-
-# test non-replication of SET PASSWORD
-# first force creation of the user on slave (because as the user does not exist
-# on slave, the SET PASSWORD may be replicated but silently do nothing; this is
-# not what we want; we want it to be not-replicated).
-grant select on *.* to rpl_ignore_grant@localhost;
-connection master;
-set password for rpl_ignore_grant@localhost=password("does it work?");
-save_master_pos;
-connection slave;
-sync_with_master;
-select password<>_binary'' from mysql.user where user=_binary'rpl_ignore_grant';
-
-# clear what we have done, to not influence other tests.
-connection master;
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-save_master_pos;
-connection slave;
-sync_with_master;
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_ignore_revoke.test b/mysql-test/suite/engines/funcs/t/rpl_ignore_revoke.test
index 00171605a92..1a981652645 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_ignore_revoke.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_ignore_revoke.test
@@ -1,51 +1,2 @@
-# test verifies that REVOKE must not be replicated when
-# slave server starts with --replicate-wild-ignore-table=mysql.%
-# the option is set in rpl_ignore_revoke-slave.opt
-# The first part of BUG#9483 for GRANT is checked by
-# existed specific rpl_ignore_grant test case (BUG#980)
+--source suite/rpl/t/rpl_ignore_revoke.test
-
-source include/master-slave.inc;
-
-### CLEAN-UP: create an account and manually duplicate it on the slave
-
-connection master;
-grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
-revoke select on *.* from 'user_foo'@'%';
-select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-
-sync_slave_with_master;
-#connection slave;
-grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
-revoke select on *.* from 'user_foo'@'%';
-select select_priv from mysql.user where user='user_foo' /* slave:must be N */;
-
-
-### TEST
-
-#connection slave;
-grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
-select select_priv from mysql.user where user='user_foo' /* slave:must be Y */;
-
-connection master;
-revoke select on *.* from 'user_foo';
-select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-
-sync_slave_with_master;
-#connection slave;
-select select_priv from mysql.user where user='user_foo' /* slave:must get Y */;
-
-### CLEAN-UP
-
-connection slave;
---disable_abort_on_error
-revoke select on *.* FROM 'user_foo';
---enable_abort_on_error
-
-connection master;
-delete from mysql.user where user="user_foo";
-sync_slave_with_master;
-
-# Since changes to mysql.* are ignored, the revoke need to
-# be done on slave as well
-delete from mysql.user where user="user_foo";
diff --git a/mysql-test/suite/engines/funcs/t/rpl_ignore_table_update.test b/mysql-test/suite/engines/funcs/t/rpl_ignore_table_update.test
index fe030f90411..9ace16dd247 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_ignore_table_update.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_ignore_table_update.test
@@ -1,38 +1,2 @@
-# This one assumes we are ignoring updates on table mysqltest_foo, but doing
-# the ones on all other tables
+--source suite/rpl/t/rpl_ignore_table_update.test
-source include/master-slave.inc;
-connection slave;
-
-#
-# For this test we must be in the test database
-#
-use test;
-
---disable_warnings
-drop table if exists mysqltest_foo;
-drop table if exists mysqltest_bar;
---enable_warnings
-
-create table mysqltest_foo (n int);
-insert into mysqltest_foo values(4);
-connection master;
-use test;
-create table mysqltest_foo (n int);
-insert into mysqltest_foo values(5);
-create table mysqltest_bar (m int);
-insert into mysqltest_bar values(15);
-create table t1 (k int);
-insert into t1 values(55);
-save_master_pos;
-connection slave;
-sync_with_master;
-select mysqltest_foo.n,mysqltest_bar.m,t1.k from mysqltest_foo,mysqltest_bar,t1;
-connection master;
-drop table mysqltest_foo,mysqltest_bar,t1;
-save_master_pos;
-connection slave;
-sync_with_master;
-drop table mysqltest_foo,mysqltest_bar,t1;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_init_slave.test b/mysql-test/suite/engines/funcs/t/rpl_init_slave.test
index 139b4902e12..85869ffbb99 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_init_slave.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_init_slave.test
@@ -1,34 +1,2 @@
-source include/master-slave.inc;
+--source suite/rpl/t/rpl_init_slave.test
-#
-# Test of init_slave variable
-#
-
-save_master_pos;
-connection slave;
-sleep 1;
-show variables like 'init_slave';
-show variables like 'max_connections';
-sync_with_master;
-reset master;
-connection master;
-show variables like 'init_slave';
-show variables like 'max_connections';
-save_master_pos;
-connection slave;
-sync_with_master;
-# Save variable value
-set @my_global_init_connect= @@global.init_connect;
-set global init_connect="set @c=1";
-show variables like 'init_connect';
-connection master;
-save_master_pos;
-connection slave;
-sync_with_master;
-stop slave;
-
-# Restore changed global variable
-set global init_connect= @my_global_init_connect;
-set global max_connections= default;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_insert.test b/mysql-test/suite/engines/funcs/t/rpl_insert.test
index f57a6e226d1..1a5d5ecba54 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_insert.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_insert.test
@@ -1,44 +1,2 @@
---echo #
---echo # Bug#20821: INSERT DELAYED fails to write some rows to binlog
---echo #
+--source suite/rpl/t/rpl_insert.test
---source include/not_embedded.inc
---source include/not_windows.inc
---source include/master-slave.inc
-
---disable_warnings
-CREATE SCHEMA IF NOT EXISTS mysqlslap;
-USE mysqlslap;
---enable_warnings
-
-CREATE TABLE t1 (id INT, name VARCHAR(64));
-
-let $query = "INSERT INTO t1 VALUES (1, 'Dr. No'), (2, 'From Russia With Love'), (3, 'Goldfinger'), (4, 'Thunderball'), (5, 'You Only Live Twice')";
---exec $MYSQL_SLAP --silent --concurrency=5 --iterations=200 --query=$query --delimiter=";"
-
-# Wait until all the 5000 inserts has been inserted into the table
---disable_query_log
-let $counter= 300; # Max 30 seconds wait
-while (`select count(*)!=5000 from mysqlslap.t1`)
-{
- sleep 0.1;
- dec $counter;
- if (!$counter)
- {
- Number of records in t1 didnt reach 5000;
- }
-}
---enable_query_log
-
-SELECT COUNT(*) FROM mysqlslap.t1;
-sync_slave_with_master;
-SELECT COUNT(*) FROM mysqlslap.t1;
-
---echo #
---echo # Cleanup
---echo #
-
-connection master;
-USE test;
-DROP SCHEMA mysqlslap;
-sync_slave_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_insert_select.test b/mysql-test/suite/engines/funcs/t/rpl_insert_select.test
index 677be526982..bc9ec9da636 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_insert_select.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_insert_select.test
@@ -1,6 +1,6 @@
# Testcase for BUG#10456 - INSERT INTO ... SELECT violating a primary key
# breaks replication
-
+-- source include/have_binlog_format_mixed_or_row.inc
-- source include/master-slave.inc
connection master;
@@ -10,10 +10,11 @@ create table t2 (n int);
insert into t2 values (1);
insert ignore into t1 select * from t2;
insert into t1 values (2);
-sync_slave_with_master;
+--sync_slave_with_master
connection slave;
select * from t1;
connection master;
drop table t1,t2;
-sync_slave_with_master;
+--sync_slave_with_master
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_loaddata2.test b/mysql-test/suite/engines/funcs/t/rpl_loaddata2.test
index 439c2b48ca5..b357054b4db 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_loaddata2.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_loaddata2.test
@@ -3,7 +3,7 @@
CREATE TABLE t1 (word CHAR(20) NOT NULL);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1;
SELECT * FROM t1 ORDER BY word;
-sync_slave_with_master;
+--sync_slave_with_master
# Check
SELECT * FROM t1 ORDER BY word;
@@ -11,4 +11,5 @@ SELECT * FROM t1 ORDER BY word;
# Cleanup
connection master;
drop table t1;
-sync_slave_with_master;
+--sync_slave_with_master
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_loaddata_m.test b/mysql-test/suite/engines/funcs/t/rpl_loaddata_m.test
index 42c3ad99f33..901c40b5079 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_loaddata_m.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_loaddata_m.test
@@ -1,52 +1,2 @@
-# See if the master logs LOAD DATA INFILE correctly when binlog_*_db rules
-# exist.
-# This is for BUG#1100 (LOAD DATA INFILE was half-logged).
-######################################################
-# Change Author: JBM
-# Change Date: 2005-12-22
-# Change: Test rewritten to remove show binlog events
-# and to test the option better + Cleanup
-######################################################
--- source include/master-slave.inc
+--source suite/rpl/t/rpl_loaddata_m.test
---disable_warnings
-drop database if exists mysqltest;
---enable_warnings
-
-connection master;
-# 'test' database should be ignored by the slave
-USE test;
-CREATE TABLE t1(a INT, b INT, UNIQUE(b));
-LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE test.t1;
-SELECT COUNT(*) FROM test.t1;
-
-# 'mysqltest' database should NOT be ignored by the slave
-CREATE DATABASE mysqltest;
-USE mysqltest;
-CREATE TABLE t1(a INT, b INT, UNIQUE(b));
-LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE mysqltest.t1;
-SELECT COUNT(*) FROM mysqltest.t1;
-
-# Now lets check the slave to see what we have :-)
-save_master_pos;
-connection slave;
-sync_with_master;
-
-SHOW DATABASES;
-
-USE test;
-SHOW TABLES;
-
-USE mysqltest;
-SHOW TABLES;
-SELECT COUNT(*) FROM mysqltest.t1;
-
-#show binlog events;
-
-# Cleanup
-connection master;
-DROP DATABASE mysqltest;
-DROP TABLE test.t1;
-sync_slave_with_master;
-
-# End of test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_loaddata_s.test b/mysql-test/suite/engines/funcs/t/rpl_loaddata_s.test
index a06df3bbfc9..7521f62403e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_loaddata_s.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_loaddata_s.test
@@ -1,30 +1,2 @@
-# See if the slave logs (in its own binlog, with --log-slave-updates) a
-# replicated LOAD DATA INFILE correctly when it has binlog_*_db rules.
-# This is for BUG#1100 (LOAD DATA INFILE was half-logged).
+--source suite/rpl/t/rpl_loaddata_s.test
--- source include/have_binlog_format_mixed_or_statement.inc
--- source include/master-slave.inc
-
-connection slave;
-# Not sure why we connect to slave and then try to reset master, but I will leave it [JBM]
-reset master;
-
-connection master;
-# 'test' is the current database
-create table test.t1(a int, b int, unique(b));
-load data infile '../../std_data/rpl_loaddata.dat' into table test.t1;
-
-# Test logging on slave;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-select count(*) from test.t1; # check that LOAD was replicated
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-show binlog events from 107; # should be nothing
-
-# Cleanup
-connection master;
-drop table test.t1;
-sync_slave_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_loaddatalocal.test b/mysql-test/suite/engines/funcs/t/rpl_loaddatalocal.test
index e272be3c13b..36e863c4573 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_loaddatalocal.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_loaddatalocal.test
@@ -6,7 +6,7 @@
# only the first 4KB, 8KB or 16KB usually.
# - the loaded file's first line was not written entirely to the
# master's binlog (1st char was absent)
-source include/master-slave.inc;
+--source include/master-slave.inc
create table t1(a int);
let $1=10000;
@@ -26,15 +26,11 @@ truncate table t1;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
eval load data local infile '$MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' into table t1;
remove_file $MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
select a,count(*) from t1 group by a;
connection master;
drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
# End of 4.1 tests
@@ -54,12 +50,9 @@ create table t1(a int primary key);
eval load data local infile '$MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile' into table t1;
remove_file $MYSQLTEST_VARDIR/tmp/rpl_loaddatalocal.select_outfile;
SELECT * FROM t1 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
SELECT * FROM t1 ORDER BY a;
connection master;
drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_loadfile.test b/mysql-test/suite/engines/funcs/t/rpl_loadfile.test
index 26235d89016..26829b265f1 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_loadfile.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_loadfile.test
@@ -41,7 +41,7 @@ CALL test.p1();
--enable_warnings
SELECT * FROM test.t1 ORDER BY blob_column;
save_master_pos;
-sync_slave_with_master;
+--sync_slave_with_master
connection slave;
SELECT * FROM test.t1 ORDER BY blob_column;
@@ -49,6 +49,6 @@ SELECT * FROM test.t1 ORDER BY blob_column;
connection master;
DROP PROCEDURE IF EXISTS test.p1;
DROP TABLE test.t1;
-sync_slave_with_master;
-
+--sync_slave_with_master
+--source include/rpl_end.inc
# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_log_pos.test b/mysql-test/suite/engines/funcs/t/rpl_log_pos.test
index 22deee6b5f3..53b38d9a2de 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_log_pos.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_log_pos.test
@@ -1,58 +1,2 @@
-##########
-# Change Author: JBM
-# Change Date: 2006-01-16
-##########
+--source suite/rpl/t/rpl_log_pos.test
-#
-# Testing of setting slave to wrong log position with master_log_pos
-#
-
-# Passes with rbr no problem, removed statement include [jbm]
-
-source include/master-slave.inc;
---replace_column 3 <Binlog_Ignore_DB>
-show master status;
-sync_slave_with_master;
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 8 # 9 # 23 # 33 #
-show slave status;
-stop slave;
-change master to master_log_pos=107;
-start slave;
-sleep 5;
-stop slave;
-change master to master_log_pos=107;
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 8 # 9 # 23 # 33 #
-show slave status;
-start slave;
-sleep 5;
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 8 # 9 # 23 # 33 #
-show slave status;
-stop slave;
-change master to master_log_pos=178;
-start slave;
-sleep 2;
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 8 # 9 # 23 # 33 #
-show slave status;
-connection master;
---replace_column 3 <Binlog_Ignore_DB>
-show master status;
-create table if not exists t1 (n int);
-drop table if exists t1;
-create table t1 (n int);
-insert into t1 values (1),(2),(3);
-save_master_pos;
-connection slave;
-stop slave;
-change master to master_log_pos=207;
-start slave;
-sync_with_master;
-select * from t1 ORDER BY n;
-connection master;
-drop table t1;
-sync_slave_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_many_optimize.test b/mysql-test/suite/engines/funcs/t/rpl_many_optimize.test
index 91fab0b27a8..337cc1b36be 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_many_optimize.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_many_optimize.test
@@ -1,22 +1,2 @@
-# Test for BUG#7658 "optimize crashes slave thread (1 in 1000)]"
+--source suite/rpl/t/rpl_many_optimize.test
-source include/master-slave.inc;
-
-create table t1 (a int not null auto_increment primary key, b int, key(b));
-INSERT INTO t1 (a) VALUES (1),(2);
-# Now many OPTIMIZE to test if we crash (BUG#7658)
-let $1=300;
-disable_query_log;
-disable_result_log;
-while ($1)
-{
- eval OPTIMIZE TABLE t1;
- dec $1;
-}
-enable_result_log;
-enable_query_log;
-drop table t1;
-# Bug was that slave segfaulted after ~ a hundred of OPTIMIZE (or ANALYZE)
-sync_slave_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_master_pos_wait.test b/mysql-test/suite/engines/funcs/t/rpl_master_pos_wait.test
deleted file mode 100644
index 893c8746efc..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl_master_pos_wait.test
+++ /dev/null
@@ -1,18 +0,0 @@
-# See if master_pos_wait(,,timeout)
-# Terminates with "timeout expired" (-1)
-source include/master-slave.inc;
-save_master_pos;
-connection slave;
-sync_with_master;
-# Ask for a master log that has certainly not been reached yet
-# timeout= 2 seconds
-select master_pos_wait('master-bin.999999',0,2);
-explain extended select master_pos_wait('master-bin.999999',0,2);
-# Testcase for bug 651 (master_pos_wait() hangs if slave idle and STOP SLAVE).
-send select master_pos_wait('master-bin.999999',0);
-connection slave1;
-stop slave sql_thread;
-connection slave;
-reap;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_misc_functions.test b/mysql-test/suite/engines/funcs/t/rpl_misc_functions.test
index 6e4bedf7371..2f0786a9271 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_misc_functions.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_misc_functions.test
@@ -1,46 +1,2 @@
-#
-# Test of replicating some difficult functions
-#
-source include/master-slave.inc;
+--source suite/rpl/t/rpl_misc_functions.test
---disable_query_log
-call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-create table t1(id int, i int, r1 int, r2 int, p varchar(100));
-insert into t1 values(1, connection_id(), 0, 0, "");
-# don't put rand and password in the same query, to see if they replicate
-# independently
-# Pure rand test
---disable_warnings
-insert into t1 values(2, 0, rand()*1000, rand()*1000, "");
---enable_warnings
-# change the rand suite on the master (we do this because otherwise password()
-# benefits from the fact that the above rand() is well replicated :
-# it picks the same sequence element, which hides a possible bug in password() replication.
-set sql_log_bin=0;
-insert into t1 values(6, 0, rand(), rand(), "");
-delete from t1 where id=6;
-set sql_log_bin=1;
-# Pure password test
-insert into t1 values(3, 0, 0, 0, password('does_this_work?'));
-# "altogether now"
---disable_warnings
-insert into t1 values(4, connection_id(), rand()*1000, rand()*1000, password('does_this_still_work?'));
---enable_warnings
-select * into outfile '../../tmp/rpl_misc_functions.outfile' from t1;
-sync_slave_with_master;
-create table t2 like t1;
-# read the values from the master table
---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-eval load data local infile '$MYSQLTEST_VARDIR/tmp/rpl_misc_functions.outfile' into table t2;
-# compare them with the replica; the SELECT below should return no row
-select * from t1, t2 where (t1.id=t2.id) and not(t1.i=t2.i and t1.r1=t2.r1 and t1.r2=t2.r2 and t1.p=t2.p);
-stop slave;
-drop table t1;
-drop table t2;
-
-connection master;
-drop table t1;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_multi_delete.test b/mysql-test/suite/engines/funcs/t/rpl_multi_delete.test
index a251cbf8833..53347fbf127 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_multi_delete.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_multi_delete.test
@@ -1,26 +1,2 @@
-source include/master-slave.inc;
-create table t1 (a int primary key);
-create table t2 (a int);
+--source suite/rpl/t/rpl_multi_delete.test
-insert into t1 values (1);
-insert into t2 values (1);
-
-
-delete t1.* from t1, t2 where t1.a = t2.a;
-
-save_master_pos;
-select * from t1;
-select * from t2;
-
-connection slave;
-sync_with_master;
-select * from t1;
-select * from t2;
-
-connection master;
-drop table t1,t2;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_multi_delete2.test b/mysql-test/suite/engines/funcs/t/rpl_multi_delete2.test
index e91fad1872a..17df048f930 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_multi_delete2.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_multi_delete2.test
@@ -1,68 +1,2 @@
-#multi delete replication bugs
-
-
-source include/master-slave.inc;
-
-#BUG#11139 - improper wild-table and table rules
-#checking for multi deletes with an alias
-
-connection master;
-set sql_log_bin=0;
-create database mysqltest_from;
-set sql_log_bin=1;
-
-connection slave;
-create database mysqltest_to;
-
-
-connection master;
-use mysqltest_from;
---disable_warnings
-drop table if exists a;
---enable_warnings
-CREATE TABLE a (i INT);
-INSERT INTO a VALUES(1);
-DELETE alias FROM a alias WHERE alias.i=1;
-SELECT * FROM a;
-insert into a values(2),(3);
-delete alias FROM a alias where alias.i=2;
-select * from a;
-save_master_pos;
-connection slave;
-
-use mysqltest_to;
-sync_with_master;
-select * from a;
-
-# BUG#3461
-connection master;
-create table t1 (a int primary key);
-create table t2 (a int);
-
-insert into t1 values (1);
-insert into t2 values (1);
-
-delete t1.* from t1, t2 where t1.a = t2.a;
-
-save_master_pos;
-select * from t1;
-select * from t2;
-
-connection slave;
-# BUG#3461 would cause sync to fail
-sync_with_master;
-error 1146;
-select * from t1;
-error 1146;
-select * from t2;
-
-# cleanup
-connection master;
-set sql_log_bin=0;
-drop database mysqltest_from;
-set sql_log_bin=1;
-connection slave;
-drop database mysqltest_to;
-
-# End of 4.1 tests
+--source suite/rpl/t/rpl_multi_delete2.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_multi_update4.test b/mysql-test/suite/engines/funcs/t/rpl_multi_update4.test
index 4991a385f6f..5538e6cba77 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_multi_update4.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_multi_update4.test
@@ -1,45 +1,2 @@
-# Let's verify that multi-update is not always skipped by slave if
-# some replicate-* rules exist.
-# (BUG#15699)
+--source suite/rpl/t/rpl_multi_update4.test
-source include/master-slave.inc;
-
-### Clean-up
-
-connection master;
---disable_warnings
-drop database if exists d1;
-drop database if exists d2;
-
-connection slave;
-drop database if exists d2;
---enable_warnings
-
-### Do on master
-
-connection master;
-create database d1; # accepted by slave
-create table d1.t0 (id int);
-create database d2; # ignored by slave
-use d2;
-create table t1 (id int);
-create table t2 (id int);
-insert into t1 values (1), (2), (3), (4), (5);
-insert into t2 select id + 3 from t1;
-# a problematic query which must be filter out by slave
-update t1 join t2 using (id) set t1.id = 0;
-insert into d1.t0 values (0); # replication works
-
-### Check on slave
-
-sync_slave_with_master;
-use d1;
-select * from t0 where id=0; # must find
-
-### Clean-up
-connection master;
-drop database d1;
-drop database d2;
-sync_slave_with_master;
-
-# End of test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_ps.test b/mysql-test/suite/engines/funcs/t/rpl_ps.test
index 09c7b779f65..d40c5737912 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_ps.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_ps.test
@@ -1,49 +1,2 @@
-#
-# Test of replicating user variables
-#
-###########################################################
-source include/master-slave.inc;
+--source suite/rpl/t/rpl_ps.test
-#save_master_pos;
-#connection slave;
-#sync_with_master;
-#reset master;
-#connection master;
-
---disable_warnings
-drop table if exists t1;
---enable_warnings
-
-create table t1(n char(30));
-
-prepare stmt1 from 'insert into t1 values (?)';
-set @var1= "from-master-1";
-execute stmt1 using @var1;
-set @var1= "from-master-2-'',";
-execute stmt1 using @var1;
-SELECT * FROM t1 ORDER BY n;
-
-set @var2= 'insert into t1 values (concat("from-var-", ?))';
-prepare stmt2 from @var2;
-set @var1='from-master-3';
-execute stmt2 using @var1;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM t1 ORDER BY n;
-
-connection master;
-
-drop table t1;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-stop slave;
-
-# End of 4.1 tests
-
-reset master;
-reset slave;
-disconnect master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test b/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test
deleted file mode 100644
index f4e6239c679..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test
+++ /dev/null
@@ -1,47 +0,0 @@
--- source include/have_binlog_format_mixed.inc
--- source include/master-slave.inc
-
-# Test that the slave temporarily switches to ROW when seeing row
-# events when it is in MIXED mode
-
---echo **** On Master ****
-CREATE TABLE t1 (a INT, b LONG);
-INSERT INTO t1 VALUES (1,1), (2,2);
-INSERT INTO t1 VALUES (3,UUID()), (4,UUID());
-let $VERSION=`select version()`;
---replace_result $VERSION VERSION
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-# Different number of binlog events are generated by different engines
---disable_result_log
-SHOW BINLOG EVENTS;
---enable_result_log
-sync_slave_with_master;
---echo **** On Slave ****
---replace_result $MASTER_MYPORT MASTER_PORT
---replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 #
---query_vertical SHOW SLAVE STATUS
---replace_result $VERSION VERSION
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-# Different number of binlog events are generated by different engines
---disable_result_log
-SHOW BINLOG EVENTS;
---enable_result_log
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_slave.sql
-
-connection master;
-DROP TABLE IF EXISTS t1;
-
-# Let's compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching
-
-diff_files $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_master.sql $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_slave.sql;
-
-# If all is good, we can remove the files
-
-remove_file $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_master.sql;
-remove_file $MYSQLTEST_VARDIR/tmp/rpl_rbr_to_sbr_slave.sql;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_relayspace.test b/mysql-test/suite/engines/funcs/t/rpl_relayspace.test
index 70315c14f34..5c60f57b496 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_relayspace.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_relayspace.test
@@ -1,34 +1,2 @@
-# The slave is started with relay_log_space_limit=10 bytes,
-# to force the deadlock after one event.
+--source suite/rpl/t/rpl_relayspace.test
-source include/master-slave.inc;
-connection slave;
-stop slave;
-connection master;
-# This will generate a master's binlog > 10 bytes
-create table t1 (a int);
-drop table t1;
-create table t1 (a int);
-drop table t1;
-connection slave;
-reset slave;
-start slave io_thread;
-# Give the I/O thread time to block.
-sleep 2;
-# A bug caused the I/O thread to refuse stopping.
-stop slave io_thread;
-reset slave;
-start slave;
-# The I/O thread stops filling the relay log when
-# it's >10b. And the SQL thread cannot purge this relay log
-# as purge is done only when the SQL thread switches to another
-# relay log, which does not exist here.
-# So we should have a deadlock.
-# if it is not resolved automatically we'll detect
-# it with master_pos_wait that waits for farther than 1Ob;
-# it will timeout after 10 seconds;
-# also the slave will probably not cooperate to shutdown
-# (as 2 threads are locked)
-select master_pos_wait('master-bin.001',200,6)=-1;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_replicate_ignore_db.test b/mysql-test/suite/engines/funcs/t/rpl_replicate_ignore_db.test
index bcfef919fad..edadb14a3ee 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_replicate_ignore_db.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_replicate_ignore_db.test
@@ -1,30 +1,2 @@
-# see if --replicate-ignore-db works
+--source suite/rpl/t/rpl_replicate_ignore_db.test
---source include/master-slave.inc
-
---disable_warnings
-drop database if exists mysqltest1;
-drop database if exists mysqltest2;
---enable_warnings
-create database mysqltest1;
-create database mysqltest2;
-
-use mysqltest1;
-create table t1 (a int);
-insert into t1 values(1);
-sync_slave_with_master;
---error 1146
-select * from mysqltest1.t1;
-
-connection master;
-use mysqltest2;
-create table t1 (a int);
-insert into t1 values(1);
-sync_slave_with_master;
-select * from mysqltest2.t1;
-
-# cleanup
-connection master;
-drop database mysqltest1;
-drop database mysqltest2;
-sync_slave_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_NOW.test b/mysql-test/suite/engines/funcs/t/rpl_row_NOW.test
index bf9576ae11b..b909062b8cc 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_NOW.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_NOW.test
@@ -1,74 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-# Updated 08/30/2005 Added dumps and diff #
-#############################################################################
-#TEST: Taken and modfied from http://bugs.mysql.com/bug.php?id=12480 #
-#############################################################################
+--source suite/rpl/t/rpl_row_NOW.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-create database if not exists mysqltest1;
-DROP TABLE IF EXISTS mysqltest1.t1;
---enable_warnings
-
-
-# Begin test section 1
-CREATE TABLE mysqltest1.t1 (n MEDIUMINT NOT NULL AUTO_INCREMENT,
- a TIMESTAMP DEFAULT '2005-05-05 01:01:01',
- b TIMESTAMP DEFAULT '2005-05-05 01:01:01',
- PRIMARY KEY(n));
-delimiter |;
-CREATE FUNCTION mysqltest1.f1() RETURNS TIMESTAMP
-BEGIN
- DECLARE v1 INT DEFAULT 300;
- WHILE v1 > 0 DO
- SET v1 = v1 - 1;
- END WHILE;
- RETURN NOW();
-END|
-delimiter ;|
-
-INSERT INTO mysqltest1.t1 VALUES(NULL,NOW(),mysqltest1.f1());
-
-delimiter |;
-CREATE TRIGGER mysqltest1.trig1 BEFORE INSERT ON mysqltest1.t1
-FOR EACH ROW BEGIN
- SET new.b = mysqltest1.f1();
-END|
-delimiter ;|
-
-INSERT INTO mysqltest1.t1 SET n = NULL, a = now();
-
-sync_slave_with_master;
-
-connection master;
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/NOW_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/NOW_slave.sql
-
-# lets cleanup
-DROP TABLE IF EXISTS mysqltest1.t1;
-DROP FUNCTION mysqltest1.f1;
-
-# Lets compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching :-) The failed dump
-# files will be located in $MYSQLTEST_VARDIR/tmp
-
-diff_files $MYSQLTEST_VARDIR/tmp/NOW_master.sql $MYSQLTEST_VARDIR/tmp/NOW_slave.sql;
-
-# If all is good, when can cleanup our dump files.
-remove_file $MYSQLTEST_VARDIR/tmp/NOW_master.sql;
-remove_file $MYSQLTEST_VARDIR/tmp/NOW_slave.sql;
-
-DROP DATABASE mysqltest1;
-sync_slave_with_master;
-# End of 5.1 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_USER.test b/mysql-test/suite/engines/funcs/t/rpl_row_USER.test
index c3639d05c48..e48b25293fa 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_USER.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_USER.test
@@ -1,57 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-#############################################################################
-# TEST: To test the UUID() in rbr #
-#############################################################################
-# Change Author: JBM
-# Change Date: 2006-01-16
-##########
+--source suite/rpl/t/rpl_row_USER.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP DATABASE IF EXISTS mysqltest1;
-CREATE DATABASE mysqltest1;
---enable_warnings
-
-# Section 1 test
-CREATE USER tester IDENTIFIED BY 'test';
-GRANT ALL ON mysqltest1.* TO 'tester'@'%' IDENTIFIED BY 'test';
-GRANT ALL ON mysqltest1.* TO ''@'localhost%';
-FLUSH PRIVILEGES;
-connect (m_1,localhost,tester,,mysqltest1);
-
-connection m_1;
-CREATE TABLE mysqltest1.t1 (a INT, users VARCHAR(255), PRIMARY KEY(a));
-INSERT INTO mysqltest1.t1 VALUES(1,USER());
-INSERT INTO mysqltest1.t1 VALUES(2,CURRENT_USER());
-delimiter |;
-create procedure mysqltest1.p1()
-begin
- INSERT INTO mysqltest1.t1 VALUES(3,USER());
- INSERT INTO mysqltest1.t1 VALUES(4,CURRENT_USER());
-end|
-delimiter ;|
-
-CALL mysqltest1.p1();
-connection master;
-SELECT * FROM mysqltest1.t1 ORDER BY a;
-sync_slave_with_master;
-SELECT * FROM mysqltest1.t1 ORDER BY a;
-
-connection master;
-# Lets cleanup
-
-REVOKE ALL ON mysqltest1.* FROM 'tester'@'%';
-REVOKE ALL ON mysqltest1.* FROM ''@'localhost%';
-DROP DATABASE mysqltest1;
-DROP USER 'tester';
-DROP USER ''@'localhost%';
-FLUSH PRIVILEGES;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_drop.test b/mysql-test/suite/engines/funcs/t/rpl_row_drop.test
index 20c217a7c3a..35090d7f32f 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_drop.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_drop.test
@@ -1,48 +1,2 @@
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
+--source suite/rpl/t/rpl_row_drop.test
-# Bug#12415: DROP of temporary table on master stops slave
-connection master;
---echo **** On Master ****
-CREATE TABLE t1 (a int);
-CREATE TABLE t2 (a int);
-CREATE TEMPORARY TABLE t2 (a int, b int);
-SHOW TABLES;
-sync_slave_with_master;
---echo **** On Slave ****
-SHOW TABLES;
-connection master;
---echo **** On Master ****
-DROP TABLE t2; # Dropping the temporary table
-SHOW TABLES;
-sync_slave_with_master;
---echo **** On Slave ****
-SHOW TABLES; # There should be two tables on the slave
-
-connection master;
---echo **** On Master ****
-CREATE TEMPORARY TABLE t2 (a int, b int);
-SHOW TABLES;
-sync_slave_with_master;
---echo **** On Slave ****
-SHOW TABLES;
-connection master;
---echo **** On Master ****
-# Should drop the non-temporary table t1 and the temporary table t2
-DROP TABLE t1,t2;
-let $VERSION=`select version()`;
---replace_result $VERSION VERSION
---replace_regex /table_id: [0-9]+/table_id: #/
-SHOW BINLOG EVENTS;
-SHOW TABLES;
-sync_slave_with_master;
---echo **** On Slave ****
-SHOW TABLES;
-
---disable_query_log
---disable_warnings
-connection master;
-DROP TABLE IF EXISTS t2;
-sync_slave_with_master;
---enable_warnings
---enable_query_log
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_func001.test b/mysql-test/suite/engines/funcs/t/rpl_row_func001.test
index 53fb55118e6..7671d9947c1 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_func001.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_func001.test
@@ -1,57 +1,2 @@
-#############################################################################
-# This test is being created to test out the non deterministic items with #
-# row based replication. #
-# Original Author: JBM #
-# Original Date: Aug/10/2005 #
-# Update: 08/29/2005 change name to initails #
-#############################################################################
+--source suite/rpl/t/rpl_row_func001.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
---error 0,1305
-DROP FUNCTION test.f1;
-DROP TABLE IF EXISTS test.t1;
-
-
---enable_warnings
-
-# Section 1 test from bug #12487 Uses stored function to insert rows to see what is replicated.
-
-create table test.t1 (a int, PRIMARY KEY(a));
-
-delimiter //;
-create function test.f1(i int) returns int
-begin
-insert into test.t1 values(i);
-return 0;
-end//
-delimiter ;//
-
-select test.f1(1);
-select test.f1(2);
-select * from test.t1;
-
-save_master_pos;
-sync_slave_with_master;
-connection slave;
-#show create table test.t1;
-select * from test.t1;
-
-connection master;
-
-#Used for debugging
-#show binlog events;
-
-# Cleanup
-
-DROP FUNCTION test.f1;
-DROP TABLE test.t1;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_inexist_tbl.test b/mysql-test/suite/engines/funcs/t/rpl_row_inexist_tbl.test
index 736071a8ece..793b94e63a6 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_inexist_tbl.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_inexist_tbl.test
@@ -2,7 +2,7 @@
# not have
--source include/have_binlog_format_row.inc
-source include/master-slave.inc;
+--source include/master-slave.inc
connection master;
create table t1 (a int not null primary key);
@@ -11,7 +11,7 @@ create table t2 (a int);
insert into t2 values (1);
update t1, t2 set t1.a = 0 where t1.a = t2.a;
-sync_slave_with_master;
+--sync_slave_with_master
# t2 should not have been replicated
# t1 should have been properly updated
show tables;
@@ -23,13 +23,16 @@ insert into t1 values (1);
connection slave;
# slave should have stopped because can't find table t1
-wait_for_slave_to_stop;
-# see if we have a good error message:
---replace_result $MASTER_MYPORT MASTER_MYPORT
---replace_column 1 # 7 # 8 # 9 # 22 # 23 # 33 #
---vertical_results
-show slave status;
+# 1146 = ER_NO_SUCH_TABLE
+call mtr.add_suppression("Slave SQL.*Error executing row event: .Table .test.t1. doesn.t exist., error.* 1146");
+--let $slave_sql_errno= 1146
+--source include/wait_for_slave_sql_error.inc
+
+--echo ==== Clean up ====
+--source include/stop_slave_io.inc
+RESET SLAVE;
-# cleanup
connection master;
drop table t1, t2;
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test b/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test
index 6df58122051..f7098d0d7bf 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test
@@ -4,6 +4,6 @@
# Test of manual relay log rotation with FLUSH LOGS.
# Requires statement logging
-source include/have_binlog_format_row.inc;
+--source include/have_binlog_format_row.inc
-source suite/rpl/include/rpl_max_relay_size.test;
+--source suite/rpl/include/rpl_max_relay_size.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp001.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp001.test
index ae6116bea4e..0610ef0274a 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp001.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp001.test
@@ -1,146 +1,2 @@
-#############################################################################
-# This test is being created to test out the non deterministic items with #
-# row based replication. #
-# Original Author: JBM #
-# Original Date: Aug/09/2005 #
-# Updated: Aug/29/2005
-#############################################################################
-# Test: Includes two stored procedure tests. First test uses SP to insert #
-# values from RAND() and NOW() into a table. #
-# The second test uses SP with CASE structure to decide what to text #
-# to update a given table with. #
-############################################################################
+--source suite/rpl/t/rpl_row_sp001.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
--- disable_query_log
--- disable_result_log
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-
--- enable_query_log
--- enable_result_log
-
-# Begin test section 1 for non deterministic SP
-let $message=<Begin test section 1 (non deterministic SP)>;
---source include/show_msg.inc
-
-create table test.t1 (n MEDIUMINT NOT NULL AUTO_INCREMENT, f FLOAT, d DATETIME, PRIMARY KEY(n));
-
-delimiter //;
-create procedure test.p1()
-begin
- INSERT INTO test.t1 (f,d) VALUES (RAND(),NOW());
-end//
-delimiter ;//
-
-# show binlog events;
-
--- disable_query_log
--- disable_result_log
-let $1=10;
-while ($1)
-{
- call test.p1();
- sleep 1;
- dec $1;
-}
--- enable_result_log
--- enable_query_log
-
-## Used for debugging
-#show binlog events;
-#select * from test.t1;
-#sync_slave_with_master;
-#select * from test.t1;
-#connection master;
-
-let $message=<End test section 1 (non deterministic SP)>;
---source include/show_msg.inc
-
-
-CREATE TABLE test.t2 (a INT NOT NULL AUTO_INCREMENT, t CHAR(4), PRIMARY KEY(a));
-
-delimiter //;
-CREATE PROCEDURE test.p2(n int)
-begin
-CASE n
-WHEN 1 THEN
- UPDATE test.t2 set t ='Tex';
-WHEN 2 THEN
- UPDATE test.t2 set t ='SQL';
-ELSE
- UPDATE test.t2 set t ='NONE';
-END CASE;
-end//
-delimiter ;//
-
-INSERT INTO test.t2 VALUES(NULL,'NEW'),(NULL,'NEW'),(NULL,'NEW'),(NULL,'NEW');
-
-SELECT * FROM t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM t2 ORDER BY a;
-
-connection master;
-call test.p2(1);
-SELECT * FROM t2 ORDER BY a;
-sync_slave_with_master;
-SELECT * FROM t2 ORDER BY a;
-
-
-connection master;
-call test.p2(2);
-SELECT * FROM t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM t2 ORDER BY a;
-
-connection master;
-call test.p2(3);
-SELECT * FROM t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM t2 ORDER BY a;
-
-##Used for debugging
-#show binlog events;
-
-# time to dump the databases and so we can see if they match
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp001_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp001_slave.sql
-
-# First lets cleanup
-
-connection master;
-DROP PROCEDURE test.p1;
-DROP PROCEDURE test.p2;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-sync_slave_with_master;
-
-# Lets compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching :-) Failed dump files
-# will be located in $MYSQLTEST_VARDIR/tmp
-
-diff_files $MYSQLTEST_VARDIR/tmp/sp001_master.sql $MYSQLTEST_VARDIR/tmp/sp001_slave.sql;
-
-# If all is good, when can cleanup our dump files.
-remove_file $MYSQLTEST_VARDIR/tmp/sp001_master.sql;
-remove_file $MYSQLTEST_VARDIR/tmp/sp001_slave.sql;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test
index d8a5aacc5e6..a304dbe009b 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test
@@ -1,108 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/15/2005 #
-# Updated: Aug/29/2005: Removed sleeps #
-#############################################################################
-# Test: Tests SPs with cursors, flow logic, and alter sp. In addition the #
-# tests SPs with insert and update operations. #
-#############################################################################
+--source suite/rpl/t/rpl_row_sp005.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t3;
---enable_warnings
-# End of cleanup
-
-# Begin test section 1
-CREATE TABLE IF NOT EXISTS test.t1(id INT, data CHAR(16),PRIMARY KEY(id));
-CREATE TABLE IF NOT EXISTS test.t2(id2 INT,PRIMARY KEY(id2));
-CREATE TABLE IF NOT EXISTS test.t3(id3 INT,PRIMARY KEY(id3), c CHAR(16));
-
-delimiter |;
-CREATE PROCEDURE test.p1()
-BEGIN
-DECLARE done INT DEFAULT 0;
- DECLARE spa CHAR(16);
- DECLARE spb,spc INT;
- DECLARE cur1 CURSOR FOR SELECT id,data FROM test.t1 ORDER BY id;
- DECLARE cur2 CURSOR FOR SELECT id2 FROM test.t2 ORDER BY id2;
- DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
-
- OPEN cur1;
- OPEN cur2;
-
- REPEAT
- FETCH cur1 INTO spb, spa;
- FETCH cur2 INTO spc;
- IF NOT done THEN
- IF spb < spc THEN
- INSERT INTO test.t3 VALUES (spb,spa);
- ELSE
- INSERT INTO test.t3 VALUES (spc,spa);
- END IF;
- END IF;
- UNTIL done END REPEAT;
-
- CLOSE cur1;
- CLOSE cur2;
-END|
-CREATE PROCEDURE test.p2()
-BEGIN
- INSERT INTO test.t1 VALUES (4,'MySQL'),(20,'ROCKS'),(11,'Texas'),(10,'kyle');
- INSERT INTO test.t2 VALUES (4),(2),(1),(3);
- UPDATE test.t1 SET id=id+4 WHERE id=4;
-END|
-delimiter ;|
-
-let $message=< ---- Master selects-- >;
---source include/show_msg.inc
-CALL test.p2();
-SELECT * FROM test.t1 ORDER BY id;
-SELECT * FROM test.t2 ORDER BY id2;
-
-let $message=< ---- Slave selects-- >;
---source include/show_msg.inc
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t1 ORDER BY id;
-SELECT * FROM test.t2 ORDER BY id2;
-
-let $message=< ---- Master selects-- >;
---source include/show_msg.inc
-connection master;
-CALL test.p1();
-sleep 6;
-SELECT * FROM test.t3 ORDER BY id3;
-
-let $message=< ---- Slave selects-- >;
---source include/show_msg.inc
-connection slave;
-SELECT * FROM test.t3 ORDER BY id3;
-
-connection master;
-
-ALTER PROCEDURE test.p1 MODIFIES SQL DATA;
-#show binlog events;
-
-# Cleanup
-
-connection master;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp008.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp008.test
index b1295820c99..3cc0ed26ea2 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp008.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp008.test
@@ -1,57 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/15/2005 #
-# Update: 08/29/2005 Remove sleep #
-#############################################################################
-# TEST: Use SQL_CALC_FOUND_ROWS and insert results into a table inside a sp #
-#############################################################################
+--source suite/rpl/t/rpl_row_sp008.test
-
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP TABLE IF EXISTS test.t2;
---enable_warnings
-# End of cleanup
-
-
-
-# Begin test section 1
-CREATE TABLE test.t1 (a INT,PRIMARY KEY(a));
-CREATE TABLE test.t2 (a INT,PRIMARY KEY(a));
-INSERT INTO test.t1 VALUES(1),(2);
-
-delimiter |;
-CREATE PROCEDURE test.p1()
-BEGIN
- SELECT SQL_CALC_FOUND_ROWS * FROM test.t1 LIMIT 1;
- INSERT INTO test.t2 VALUES(FOUND_ROWS());
-END|
-delimiter ;|
-
-let $message=< ---- Master selects-- >;
---source include/show_msg.inc
-CALL test.p1();
-SELECT * FROM test.t2;
-
-let $message=< ---- Slave selects-- >;
---source include/show_msg.inc
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t2;
-
-# Cleanup
-
-connection master;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test
index 505ed582ba9..56b42a0953d 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test
@@ -1,102 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-# Updated: 08/29/2005 removed sleeps and added master pos save and snyc #
-#############################################################################
-#TEST: Taken and modfied from http://bugs.mysql.com/bug.php?id=12168 #
-#############################################################################
+--source suite/rpl/t/rpl_row_sp009.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-
-
-# Begin test section 1
-CREATE TABLE test.t1 (a INT, PRIMARY KEY(a));
-INSERT INTO test.t1 VALUES (1),(2),(3),(4);
-CREATE TABLE test.t2 (a INT, PRIMARY KEY(a));
-
-delimiter |;
-CREATE PROCEDURE test.p1 (arg1 CHAR(1))
-BEGIN
- DECLARE b, c INT;
- IF arg1 = 'a' THEN
- BEGIN
- DECLARE cur1 CURSOR FOR SELECT A FROM test.t1 WHERE a % 2;
- DECLARE continue handler for not found set b = 1;
- SET b = 0;
- OPEN cur1;
- c1_repeat: REPEAT
- FETCH cur1 INTO c;
- IF (b = 1) THEN
- LEAVE c1_repeat;
- END IF;
-
- INSERT INTO test.t2 VALUES (c);
- UNTIL b = 1
- END REPEAT;
- CLOSE cur1;
- END;
- END IF;
- IF arg1 = 'b' THEN
- BEGIN
- DECLARE cur2 CURSOR FOR SELECT a FROM test.t1 WHERE NOT a % 2;
- DECLARE continue handler for not found set b = 1;
- SET b = 0;
- OPEN cur2;
- c2_repeat: REPEAT
- FETCH cur2 INTO c;
- IF (b = 1) THEN
- LEAVE c2_repeat;
- END IF;
-
- INSERT INTO test.t2 VALUES (c);
- UNTIL b = 1
- END REPEAT;
- CLOSE cur2;
- END;
- END IF;
-END|
-delimiter ;|
-
-CALL test.p1('a');
-SELECT * FROM test.t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t2 ORDER BY a;
-connection master;
-truncate test.t2;
-
-# this next call fails, but should not
-call test.p1('b');
-select * from test.t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t2 ORDER BY a;
-
-connection master;
-truncate test.t2;
-SELECT * FROM test.t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t2 ORDER BY a;
-
-# Cleanup
-connection master;
-DROP PROCEDURE test.p1;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp010.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp010.test
index 28b82217517..a52fc311315 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp010.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp010.test
@@ -1,80 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-# Update: 08/29/2005 remove sleep added master pos save and sync #
-#############################################################################
-#TEST: Taken and modfied from http://bugs.mysql.com/bug.php?id=11126 #
-#############################################################################
+--source suite/rpl/t/rpl_row_sp010.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
-DROP PROCEDURE IF EXISTS test.p4;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-
-
-# Begin test section 1
-delimiter |;
-CREATE PROCEDURE test.p1()
-BEGIN
- INSERT INTO test.t1 VALUES(2);
-END|
-CREATE PROCEDURE test.p2()
-BEGIN
- DROP TEMPORARY TABLE IF EXISTS test.t1;
- CREATE TEMPORARY TABLE test.t1 (a int, PRIMARY KEY(a));
- INSERT INTO test.t1 VALUES(1);
- CALL test.p1();
-END|
-delimiter ;|
-CALL test.p2();
-SELECT * FROM test.t1 ORDER BY a;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-show tables;
-
-connection master;
-delimiter |;
-CREATE PROCEDURE test.p3()
-BEGIN
- INSERT INTO test.t2 VALUES(7);
-END|
-CREATE PROCEDURE test.p4()
-BEGIN
- DROP TABLE IF EXISTS test.t2;
- CREATE TABLE test.t2 (a int, PRIMARY KEY(a));
- INSERT INTO test.t2 VALUES(6);
- CALL test.p3();
-END|
-delimiter ;|
-CALL test.p4();
-SELECT * FROM test.t2 ORDER BY a;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t2 ORDER BY a;
-
-# Cleanup
-connection master;
-#show binlog events;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
-DROP PROCEDURE IF EXISTS test.p4;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp011.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp011.test
index 1c24904dd5a..c24282c8f16 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp011.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp011.test
@@ -1,111 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/18/2005 #
-# Updated: 08/29/2005 turned on diff and commented out debug SQL statements#
-#############################################################################
-#TEST: SP to test alter table and nested SP calls #
-#############################################################################
+--source suite/rpl/t/rpl_row_sp011.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
-DROP PROCEDURE IF EXISTS test.p4;
-DROP PROCEDURE IF EXISTS test.p5;
-DROP PROCEDURE IF EXISTS test.p6;
-DROP PROCEDURE IF EXISTS test.p7;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-
-
-# Begin test section 1
-CREATE TABLE test.t1 (a int, PRIMARY KEY(a));
-INSERT INTO test.t1 VALUES (1);
-
-delimiter |;
-CREATE PROCEDURE test.p1()
-BEGIN
- ALTER TABLE test.t1 ADD COLUMN b CHAR(4) AFTER a;
- UPDATE test.t1 SET b = 'rbr' WHERE a = 1;
- CALL test.p2();
-END|
-CREATE PROCEDURE test.p2()
-BEGIN
- ALTER TABLE test.t1 ADD COLUMN f FLOAT AFTER b;
- UPDATE test.t1 SET f = RAND() WHERE a = 1;
- CALL test.p3();
-END|
-CREATE PROCEDURE test.p3()
-BEGIN
- ALTER TABLE test.t1 RENAME test.t2;
- CALL test.p4();
-END|
-CREATE PROCEDURE test.p4()
-BEGIN
- ALTER TABLE test.t2 ADD INDEX (f);
- ALTER TABLE test.t2 CHANGE a a INT UNSIGNED NOT NULL AUTO_INCREMENT;
- INSERT INTO test.t2 VALUES (NULL,'TEST',RAND());
- CALL test.p5();
-END|
-CREATE PROCEDURE test.p5()
-BEGIN
- ALTER TABLE test.t2 ORDER BY f;
- INSERT INTO test.t2 VALUES (NULL,'STM',RAND());
- CALL test.p6();
-END|
-CREATE PROCEDURE test.p6()
-BEGIN
- ALTER TABLE test.t2 ADD COLUMN b2 CHAR(4) FIRST;
- ALTER TABLE test.t2 ADD COLUMN to_drop BIT(8) AFTER b2;
- INSERT INTO test.t2 VALUES ('new',1,NULL,'STM',RAND());
- CALL test.p7();
-END|
-CREATE PROCEDURE test.p7()
-BEGIN
- ALTER TABLE test.t2 DROP COLUMN to_drop;
- INSERT INTO test.t2 VALUES ('gone',NULL,'STM',RAND());
-END|
-delimiter ;|
-CALL test.p1();
-
-#SELECT * FROM test.t2;
-sync_slave_with_master;
-#SELECT * FROM test.t2;
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp011_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp011_slave.sql
-
-# Cleanup
-connection master;
-#show binlog events;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
-DROP PROCEDURE IF EXISTS test.p4;
-DROP PROCEDURE IF EXISTS test.p5;
-DROP PROCEDURE IF EXISTS test.p6;
-DROP PROCEDURE IF EXISTS test.p7;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-sync_slave_with_master;
-
-# Lets compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching :-) Failed test
-# Will leave dump files in $MYSQLTEST_VARDIR/tmp
-
-diff_files $MYSQLTEST_VARDIR/tmp/sp011_master.sql $MYSQLTEST_VARDIR/tmp/sp011_slave.sql;
-
-# If all is good, when can cleanup our dump files.
-remove_file $MYSQLTEST_VARDIR/tmp/sp011_master.sql;
-remove_file $MYSQLTEST_VARDIR/tmp/sp011_slave.sql;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp012.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp012.test
index f3963c7537a..f23e5c905a6 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_sp012.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp012.test
@@ -1,75 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/22/2005 #
-# Update: 08/29/2005 Added save pos and sync #
-#############################################################################
-#TEST: SP to test security and current_user and user #
-#############################################################################
-
-
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/not_embedded.inc
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
-
-
-# Begin test section 1
-# Create user user1 with no particular access rights
-grant usage on *.* to user1@localhost;
-flush privileges;
-
-SELECT CURRENT_USER();
-SELECT USER();
-CREATE PROCEDURE test.p1 () SQL SECURITY INVOKER SELECT CURRENT_USER(), USER();
-CREATE PROCEDURE test.p2 () SQL SECURITY DEFINER CALL test.p1();
-CREATE PROCEDURE test.p3 () SQL SECURITY INVOKER CALL test.p1();
-GRANT EXECUTE ON PROCEDURE p1 TO user1@localhost;
-GRANT EXECUTE ON PROCEDURE p2 TO user1@localhost;
-GRANT EXECUTE ON PROCEDURE p3 TO user1@localhost;
-
-# Need to wait for the rights to be applied at the slave
-sync_slave_with_master;
-
-let $message=<******** Master user1 p3 & p2 calls *******>;
---source include/show_msg.inc
-connect (muser1,localhost,user1,,);
-connection muser1;
-SELECT CURRENT_USER();
-SELECT USER();
-CALL test.p3();
-CALL test.p2();
-
-let $message=<******** Slave user1 p3 & p2 calls *******>;
---source include/show_msg.inc
-connect (suser1,127.0.0.1,user1,,test,$SLAVE_MYPORT,);
-
-connection master;
-save_master_pos;
-connection suser1;
-sync_with_master;
-
-SELECT CURRENT_USER();
-SELECT USER();
-CALL test.p3();
-CALL test.p2();
-
-# Cleanup
-connection master;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p3;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP USER 'user1'@'localhost';
-sync_slave_with_master;
-
-# End of 5.0 test case
+--source suite/rpl/t/rpl_row_sp012.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_stop_middle.test b/mysql-test/suite/engines/funcs/t/rpl_row_stop_middle.test
index da363736100..397bc9a1d61 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_stop_middle.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_stop_middle.test
@@ -9,8 +9,8 @@
connection master;
create table t1 (a int not null auto_increment primary key, b int, key(b));
-sync_slave_with_master;
-stop slave;
+--sync_slave_with_master
+--source include/stop_slave.inc
connection master;
INSERT INTO t1 (a) VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
INSERT INTO t1 (a) SELECT null FROM t1;
@@ -28,13 +28,13 @@ INSERT INTO t1 (a) SELECT null FROM t1;
INSERT INTO t1 (a) SELECT null FROM t1;
connection slave;
-start slave;
+--source include/start_slave.inc
# hope one second is not enough for slave to reach the last
# Rows_log_event, so that test actually tests something.
real_sleep 1;
-stop slave;
+--source include/stop_slave.inc
# see if slave hangs on DROP TABLE
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_trig001.test b/mysql-test/suite/engines/funcs/t/rpl_row_trig001.test
index 7b1fca2d6a1..77fd9fbf90e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_trig001.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_trig001.test
@@ -1,100 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/09/2005 #
-#############################################################################
-# TEST: Use after insert and before inset triggers and stored procdures to #
-# Update and insert data #
-#############################################################################
+--source suite/rpl/t/rpl_row_trig001.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
--- disable_query_log
--- disable_result_log
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p2;
-DROP PROCEDURE IF EXISTS test.p3;
---error 0,1360
-DROP TRIGGER test.t2_ai;
---error 0,1360
-DROP TRIGGER test.t3_bi_t2;
---error 0,1360
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-
-
-# test section 1, lets add a trigger to the mix. Taken from bug #12280
-let $message=<Begin test section 1 (Tiggers & SP)>;
---source include/show_msg.inc
-
-CREATE TABLE test.t1 (n MEDIUMINT NOT NULL, d DATETIME, PRIMARY KEY(n));
-CREATE TABLE test.t2 (n MEDIUMINT NOT NULL AUTO_INCREMENT, f FLOAT, d DATETIME, PRIMARY KEY(n));
-CREATE TABLE test.t3 (n MEDIUMINT NOT NULL AUTO_INCREMENT, d DATETIME, PRIMARY KEY(n));
-
-INSERT INTO test.t1 VALUES (1,NOW());
-
-delimiter //;
-CREATE TRIGGER test.t2_ai AFTER INSERT ON test.t2 FOR EACH ROW UPDATE test.t1 SET d=NOW() where n = 1;//
-CREATE PROCEDURE test.p3()
-BEGIN
- INSERT INTO test.t3 (d) VALUES (NOW());
-END//
-CREATE TRIGGER test.t3_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW CALL test.p3()//
-CREATE PROCEDURE test.p2()
-BEGIN
- INSERT INTO test.t2 (f,d) VALUES (RAND(),NOW());
-END//
-delimiter ;//
-
--- disable_query_log
--- disable_result_log
-let $1=10;
-while ($1)
-{
- CALL test.p2();
- sleep 1;
- dec $1;
-}
--- enable_result_log
--- enable_query_log
-
-#show binlog events;
-#select * from test.t2;
-#select * from test.t3;
-#connection slave;
-#select * from test.t2;
-#select * from test.t3;
-
-let $message=<End test section 2 (Tiggers & SP)>;
---source include/show_msg.inc
-
-# time to dump the databases and so we can see if they match
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trig001_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trig001_slave.sql
-
-# Cleanup
-connection master;
-DROP PROCEDURE test.p2;
-DROP PROCEDURE test.p3;
-DROP TRIGGER test.t2_ai;
-DROP TRIGGER test.t3_bi_t2;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-sync_slave_with_master;
-
-# Lets compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching :-) Failed tests
-# will leave dump files in $MYSQLTEST_VARDIR/tmp
-
-diff_files $MYSQLTEST_VARDIR/tmp/trig001_master.sql $MYSQLTEST_VARDIR/tmp/trig001_slave.sql;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_trig002.test b/mysql-test/suite/engines/funcs/t/rpl_row_trig002.test
index 44b7d8b1dc2..8b7342c32a1 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_trig002.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_trig002.test
@@ -1,82 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/14/2005 #
-# Updated: 08/29/2005 added save master pos and sync with master #
-#############################################################################
-# TEST: Taken and modified from BUG#12048 After Insert updates replication #
-#############################################################################
+--source suite/rpl/t/rpl_row_trig002.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-#-- disable_query_log
-#-- disable_result_log
-
-# Begin clean up test section
-connection master;
---disable_warnings
---error 0,1360
-DROP TRIGGER test.t2_ai;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
---enable_warnings
-
-# test section 1, Taken from bug #12408
-
-CREATE TABLE test.t2 (value CHAR(30),domain_id INT, mailaccount_id INT, program CHAR(30),keey CHAR(30),PRIMARY KEY(domain_id));
-
-CREATE TABLE test.t3 (value CHAR(30),domain_id INT, mailaccount_id INT, program CHAR(30),keey CHAR(30),PRIMARY KEY(domain_id));
-
-CREATE TABLE test.t1 (id INT,domain CHAR(30),PRIMARY KEY(id));
-
-delimiter |;
-CREATE TRIGGER test.t2_ai AFTER INSERT ON test.t2 FOR EACH ROW UPDATE test.t3 ms, test.t1 d SET ms.value='No' WHERE ms.domain_id = (SELECT max(id) FROM test.t1 WHERE domain='example.com') AND ms.mailaccount_id IS NULL AND ms.program='spamfilter' AND ms.keey='scan_incoming'|
-delimiter ;|
-
-INSERT INTO test.t1 VALUES (1, 'example.com'),(2, 'mysql.com'),(3, 'earthmotherwear.com'), (4, 'yahoo.com'),(5, 'example.com');
-
-SELECT * FROM test.t1 ORDER BY id;
-#show binlog events;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t1 ORDER BY id;
-connection master;
-
-INSERT INTO test.t3 VALUES ('Yes', 5, NULL, 'spamfilter','scan_incoming');
-INSERT INTO test.t3 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
-INSERT INTO test.t2 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
-
-select * from test.t2;
---sorted_result
-select * from test.t3;
-save_master_pos;
-connection slave;
-sync_with_master;
-select * from test.t2;
---sorted_result
-select * from test.t3;
-connection master;
-
-DELETE FROM test.t1 WHERE id = 1;
-
-SELECT * FROM test.t1 ORDER BY id;
-connection master;
-SELECT * FROM test.t1 ORDER BY id;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t1 ORDER BY id;
-
-# Cleanup
-connection master;
-#show binlog events;
-DROP TRIGGER test.t2_ai;
-DROP TABLE test.t1;
-DROP TABLE test.t2;
-DROP TABLE test.t3;
-sync_slave_with_master;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_trig003.test b/mysql-test/suite/engines/funcs/t/rpl_row_trig003.test
index 4a1bbc5ca89..2bb581995d0 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_trig003.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_trig003.test
@@ -1,152 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/16/2005 #
-# Updated: 8/29/2005 Remove sleep calls add dump and diff #
-#############################################################################
-# TEST: This test includes all trigger types. BEFORE/AFTER INSERT, UPDATE & #
-# DELETE. In addition, includes cursor, bit, varchar, flow control, #
-# looping, ROUND(), NOW(), YEAR(), TIMESTAMP #
-#############################################################################
+--source suite/rpl/t/rpl_row_trig003.test
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-#-- disable_query_log
-#-- disable_result_log
-
-# Begin clean up test section
-connection master;
---disable_warnings
---error 0,1360
-DROP TRIGGER test.t1_bi;
---error 0,1360
-DROP TRIGGER test.t2_ai;
---error 0,1360
-DROP TRIGGER test.t1_bu;
---error 0,1360
-DROP TRIGGER test.t2_au;
---error 0,1360
-DROP TRIGGER test.t1_bd;
---error 0,1360
-DROP TRIGGER test.t2_ad;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
---enable_warnings
-
-# test section 1
-
-CREATE TABLE test.t1 (id MEDIUMINT NOT NULL AUTO_INCREMENT, b1 BIT(8), vc VARCHAR(255), bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, f FLOAT DEFAULT 0, total BIGINT UNSIGNED, y YEAR, t TIMESTAMP,PRIMARY KEY(id));
-CREATE TABLE test.t2 (id MEDIUMINT NOT NULL AUTO_INCREMENT, b1 BIT(8), vc VARCHAR(255), bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, f FLOAT DEFAULT 0, total BIGINT UNSIGNED, y YEAR, t TIMESTAMP,PRIMARY KEY(id));
-CREATE TABLE test.t3 (id MEDIUMINT NOT NULL AUTO_INCREMENT, b1 BIT(8), vc VARCHAR(255), bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, f FLOAT DEFAULT 0, total BIGINT UNSIGNED, y YEAR, t TIMESTAMP,PRIMARY KEY(id));
-
-# Note Most of these cause the slave to core or do not produce desired results. Currently commenting out the ones not working until they are fixed.
-
-delimiter |;
-CREATE TRIGGER test.t1_bi BEFORE INSERT ON test.t1 FOR EACH ROW UPDATE test.t3 SET b1=1 and y=YEAR(NOW())|
-CREATE TRIGGER test.t2_ai AFTER INSERT ON test.t2 FOR EACH ROW BEGIN
- INSERT INTO test.t3 VALUES(NULL,0,'MySQL Replication team rocks!', 'Dark beer in prague is #1',12345.34,12.51,0,1965,NOW());
- UPDATE test.t3 SET f = ROUND(f);
-END|
-CREATE TRIGGER test.t1_bu BEFORE UPDATE on test.t1 FOR EACH ROW BEGIN
- UPDATE test.t3 SET y = '2000';
- INSERT INTO test.t3 VALUES(NULL,1,'Testing MySQL databases before update ', 'Insert should work',621.43, 0105.21,0,1974,NOW());
-END|
-CREATE TRIGGER test.t2_au AFTER UPDATE on test.t2 FOR EACH ROW BEGIN
- DECLARE done INT DEFAULT 0;
- DECLARE a DECIMAL(10,4);
- DECLARE b FLOAT;
- DECLARE num MEDIUMINT;
- DECLARE cur1 CURSOR FOR SELECT t2.id, t2.d, t2.f FROM test.t2;
- DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
-
- OPEN cur1;
-
- REPEAT
- FETCH cur1 INTO num, a, b;
- IF NOT done THEN
- UPDATE test.t3 SET total =(a*b) WHERE ID = num;
- END IF;
- UNTIL done END REPEAT;
- CLOSE cur1;
-END|
-CREATE TRIGGER test.t1_bd BEFORE DELETE on test.t1 FOR EACH ROW BEGIN
- DECLARE done INT DEFAULT 0;
- DECLARE a BIT(8);
- DECLARE b VARCHAR(255);
- DECLARE c CHAR(255);
- DECLARE d DECIMAL(10,4);
- DECLARE e FLOAT;
- DECLARE f BIGINT UNSIGNED;
- DECLARE g YEAR;
- DECLARE h TIMESTAMP;
- DECLARE cur1 CURSOR FOR SELECT t1.b1, t1.vc, t1.bc, t1.d, t1.f, t1.total, t1.y, t1.t FROM test.t1;
- DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
-
- OPEN cur1;
-
- REPEAT
- FETCH cur1 INTO a, b, c, d, e, f, g, h;
- IF NOT done THEN
- INSERT INTO test.t3 VALUES(NULL, a, b, c, d, e, f, g, h);
- END IF;
- UNTIL done END REPEAT;
- CLOSE cur1;
-END|
-CREATE TRIGGER test.t2_ad AFTER DELETE ON test.t2 FOR EACH ROW
- DELETE FROM test.t1|
-delimiter ;|
-
-INSERT INTO test.t1 VALUES(NULL,1,'Testing MySQL databases is a cool ', 'Must make it bug free for the customer',654321.4321,15.21,0,1965,NOW());
-INSERT INTO test.t2 VALUES(NULL,0,'Testing MySQL databases is a cool ', 'MySQL Customers ROCK!',654321.4321,1.24521,0,YEAR(NOW()),NOW());
-
-UPDATE test.t1 SET b1 = 0 WHERE b1 = 1;
-
-INSERT INTO test.t2 VALUES(NULL,1,'This is an after update test.', 'If this works, total will not be zero on the master or slave',1.4321,5.221,0,YEAR(NOW()),NOW());
-UPDATE test.t2 SET b1 = 0 WHERE b1 = 1;
-
-INSERT INTO test.t1 VALUES(NULL,1,'add some more test data test.', 'and hope for the best', 3.321,5.221,0,YEAR(NOW()),NOW());
-
-# To make sure BUG#14698 is gone, we sleep 2 seconds before calling trigger
-# (with the bug in, that caused differences in TIMESTAMP columns).
-# We just need to let the machine's clock advance, it's not
-# to do synchronization, so real_sleep is good.
-real_sleep 2;
-
-DELETE FROM test.t1 WHERE id = 1;
-
-DELETE FROM test.t2 WHERE id = 1;
-
-save_master_pos;
-connection slave;
-sync_with_master;
-connection master;
-
-# time to dump the databases and so we can see if they match
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trg003_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/trg003_slave.sql
-
-# cleanup
---disable_warnings
---error 0,1360
-DROP TRIGGER test.t1_bi;
---error 0,1360
-DROP TRIGGER test.t2_ai;
---error 0,1360
-DROP TRIGGER test.t1_bu;
---error 0,1360
-DROP TRIGGER test.t2_au;
---error 0,1360
-DROP TRIGGER test.t1_bd;
---error 0,1360
-DROP TRIGGER test.t2_ad;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
---enable_warnings
-
-diff_files $MYSQLTEST_VARDIR/tmp/trg003_master.sql $MYSQLTEST_VARDIR/tmp/trg003_slave.sql;
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_until.test b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
index 7f8b0000042..d919bf7773b 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_until.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_until.test
@@ -1,126 +1,2 @@
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
+--source suite/rpl/t/rpl_row_until.test
-# Note: The test is dependent on binlog positions
-
-# Create some events on master
-connection master;
-CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
-INSERT INTO t1 VALUES (1),(2),(3),(4);
-DROP TABLE t1;
-# Save master log position for query DROP TABLE t1
-save_master_pos;
-let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 11);
-let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 11);
-
-CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
-# Save master log position for query CREATE TABLE t2
-save_master_pos;
-let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 12);
-
-INSERT INTO t2 VALUES (1),(2);
-save_master_pos;
-# Save master log position for query INSERT INTO t2 VALUES (1),(2);
-let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 19);
-sync_slave_with_master;
-
-# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
-let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
-
-connection master;
-INSERT INTO t2 VALUES (3),(4);
-DROP TABLE t2;
-# Save master log position for query DROP TABLE t2;
-let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 26);
-sync_slave_with_master;
-
---source include/stop_slave.inc
-# Reset slave.
-RESET SLAVE;
---disable_query_log
-eval CHANGE MASTER TO MASTER_USER='root', MASTER_CONNECT_RETRY=1, MASTER_HOST='127.0.0.1', MASTER_PORT=$MASTER_MYPORT;
---enable_query_log
-
-# Try to replicate all queries until drop of t1
-connection slave;
-echo START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_drop_t1;
---disable_query_log
-eval START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_drop_t1;
---enable_query_log
---source include/wait_for_slave_sql_to_stop.inc
-
-# Here table should be still not deleted
-SELECT * FROM t1;
---let $slave_param= Exec_Master_Log_Pos
---let $slave_param_value= $master_pos_drop_t1
---source include/check_slave_param.inc
-
-# This should fail right after start
---replace_result 291 MASTER_LOG_POS
-START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=291;
---source include/wait_for_slave_sql_to_stop.inc
-# again this table should be still not deleted
-SELECT * FROM t1;
-
---let $slave_param= Exec_Master_Log_Pos
---let $slave_param_value= $master_pos_drop_t1
---source include/check_slave_param.inc
-
-# Try replicate all up to and not including the second insert to t2;
-echo START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2;
---disable_query_log
-eval START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=$relay_pos_insert1_t2;
---enable_query_log
---source include/wait_for_slave_sql_to_stop.inc
-SELECT * FROM t2;
-
---let $slave_param= Exec_Master_Log_Pos
---let $slave_param_value= $master_pos_insert1_t2
---source include/check_slave_param.inc
-
-# clean up
-START SLAVE;
---source include/wait_for_slave_to_start.inc
-connection master;
-sync_slave_with_master;
---source include/stop_slave.inc
-
-# This should stop immediately as we are already there
-echo START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_create_t2;
---disable_query_log
-eval START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_create_t2;
---enable_query_log
-let $slave_param= Until_Log_Pos;
-let $slave_param_value= $master_pos_create_t2;
---source include/wait_for_slave_param.inc
---source include/wait_for_slave_sql_to_stop.inc
-# here the sql slave thread should be stopped
---let $slave_param= Exec_Master_Log_Pos
---let $slave_param_value= $master_pos_drop_t2
---source include/check_slave_param.inc
-
-#testing various error conditions
---replace_result 561 MASTER_LOG_POS
---error 1277
-START SLAVE UNTIL MASTER_LOG_FILE='master-bin', MASTER_LOG_POS=561;
---replace_result 561 MASTER_LOG_POS 12 RELAY_LOG_POS
---error 1277
-START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=561, RELAY_LOG_POS=12;
---error 1277
-START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001';
---error 1277
-START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
---replace_result 561 MASTER_LOG_POS
---error 1277
-START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=561;
-# Warning should be given for second command
-START SLAVE;
---replace_result 740 MASTER_LOG_POS
-START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=740;
-
---source include/stop_slave.inc
-# Clear slave IO error.
-RESET SLAVE;
-
---let $rpl_only_running_threads= 1
---source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_view01.test b/mysql-test/suite/engines/funcs/t/rpl_row_view01.test
index 1ccfcb4eb27..6d58666f883 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_row_view01.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_row_view01.test
@@ -1,82 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/19/2005 #
-# Updated: 08/29/2005 Remove sleeps #
-#############################################################################
-#TEST: row based replication of views #
-#############################################################################
-# Includes
--- source include/have_binlog_format_row.inc
--- source include/master-slave.inc
-
-# Begin clean up test section
-connection master;
---disable_warnings
-create database if not exists mysqltest1;
-DROP VIEW IF EXISTS mysqltest1.v1;
-DROP VIEW IF EXISTS mysqltest1.v2;
-DROP VIEW IF EXISTS mysqltest1.v3;
-DROP VIEW IF EXISTS mysqltest1.v4;
-DROP TABLE IF EXISTS mysqltest1.t3;
-DROP TABLE IF EXISTS mysqltest1.t1;
-DROP TABLE IF EXISTS mysqltest1.t2;
-DROP TABLE IF EXISTS mysqltest1.t4;
-
-# Begin test section 1
-CREATE TABLE mysqltest1.t1 (a INT, c CHAR(6),PRIMARY KEY(a));
-CREATE TABLE mysqltest1.t2 (a INT, c CHAR(6),PRIMARY KEY(a));
-CREATE TABLE mysqltest1.t3 (a INT, c CHAR(6), c2 CHAR(6), PRIMARY KEY(a));
-CREATE TABLE mysqltest1.t4 (a INT, qty INT, price INT,PRIMARY KEY(a));
-CREATE TABLE mysqltest1.t5 (qty INT, price INT, total INT, PRIMARY KEY(qty));
-INSERT INTO mysqltest1.t1 VALUES (1,'Thank'),(2,'it'),(3,'Friday');
-INSERT INTO mysqltest1.t2 VALUES (1,'GOD'),(2,'is'),(3,'TGIF');
-INSERT INTO mysqltest1.t4 VALUES(1, 3, 50),(2, 18, 3),(4, 4, 4);
-
-
-CREATE VIEW mysqltest1.v2 AS SELECT qty, price, qty*price AS value FROM mysqltest1.t4 ORDER BY qty;
-CREATE VIEW mysqltest1.v1 AS SELECT t1.a, t1.c, t2.c as c2 FROM mysqltest1.t1 as t1, mysqltest1.t2 AS t2 WHERE mysqltest1.t1.a = mysqltest1.t2.a ORDER BY a;
-CREATE VIEW mysqltest1.v3 AS SELECT * FROM mysqltest1.t1;
-CREATE VIEW mysqltest1.v4 AS SELECT * FROM mysqltest1.v3 WHERE a > 1 WITH LOCAL CHECK OPTION;
-
-
-SELECT * FROM mysqltest1.v2;
-SELECT * FROM mysqltest1.v1;
-sync_slave_with_master;
-SELECT * FROM mysqltest1.v2;
-SELECT * FROM mysqltest1.v1;
-connection master;
-
-INSERT INTO mysqltest1.t5 SELECT * FROM mysqltest1.v2;
-INSERT INTO mysqltest1.t3 SELECT * FROM mysqltest1.v1;
-
-SELECT * FROM mysqltest1.t5 ORDER BY qty;
-SELECT * FROM mysqltest1.t3 ORDER BY a;
-sync_slave_with_master;
-SELECT * FROM mysqltest1.t5 ORDER BY qty;
-SELECT * FROM mysqltest1.t3 ORDER BY a;
-connection master;
-
-INSERT INTO mysqltest1.v4 VALUES (4,'TEST');
-
-SELECT * FROM mysqltest1.t1 ORDER BY a;
-SELECT * FROM mysqltest1.v4 ORDER BY a;
-sync_slave_with_master;
-SELECT * FROM mysqltest1.t1 ORDER BY a;
-SELECT * FROM mysqltest1.v4 ORDER BY a;
-
-connection master;
-
-# lets cleanup
-DROP VIEW IF EXISTS mysqltest1.v1;
-DROP VIEW IF EXISTS mysqltest1.v2;
-DROP VIEW IF EXISTS mysqltest1.v3;
-DROP VIEW IF EXISTS mysqltest1.v4;
-DROP TABLE IF EXISTS mysqltest1.t3;
-DROP TABLE IF EXISTS mysqltest1.t1;
-DROP TABLE IF EXISTS mysqltest1.t2;
-DROP TABLE IF EXISTS mysqltest1.t4;
-DROP DATABASE mysqltest1;
-sync_slave_with_master;
-
-# End of 5.1 test case
+--source suite/rpl/t/rpl_row_view01.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_server_id1.test b/mysql-test/suite/engines/funcs/t/rpl_server_id1.test
index 71310750b60..1412db46d3a 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_server_id1.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_server_id1.test
@@ -4,23 +4,28 @@
# ignored, which has caught our customers), unless
# --replicate-same-server-id.
-source include/master-slave.inc;
+--source include/master-slave.inc
connection slave;
create table t1 (n int);
reset master;
# replicate ourselves
-stop slave;
+--source include/stop_slave.inc
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval change master to master_port=$SLAVE_MYPORT;
---replace_result $SLAVE_MYPORT SLAVE_PORT
---replace_column 16 # 18 #
-show slave status;
+--let $status_items= Slave_IO_Running, Slave_SQL_Running, Last_SQL_Errno, Last_SQL_Error, Exec_Master_Log_Pos
+--source include/show_slave_status.inc
start slave;
insert into t1 values (1);
-# can't MASTER_POS_WAIT(), it does not work in this weird setup
-# (when slave is its own master without --replicate-same-server-id)
-sleep 2; # enough time for the event to be replicated (it should not)
-show status like "slave_running";
-drop table t1;
-# End of 4.1 tests
+--let $slave_param=Last_IO_Errno
+--let $slave_param_value=1593
+--source include/wait_for_slave_param.inc
+
+--let $slave_field_result_replace= / at [0-9]*/ at XXX/
+--let $status_items= Last_IO_Errno, Last_IO_Error
+--source include/show_slave_status.inc
+
+--source include/stop_slave.inc
+reset slave;
+reset master;
+drop table t1;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_server_id2.test b/mysql-test/suite/engines/funcs/t/rpl_server_id2.test
index 0f2eb560d18..8d48746ba5e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_server_id2.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_server_id2.test
@@ -1,26 +1,2 @@
-# This test checks that a slave DOES execute queries originating
-# from itself, if running with --replicate-same-server-id.
+--source suite/rpl/t/rpl_server_id2.test
-source include/master-slave.inc;
-connection slave;
-create table t1 (n int);
-reset master;
-# replicate ourselves
-stop slave;
---replace_result $SLAVE_MYPORT SLAVE_PORT
-eval change master to master_port=$SLAVE_MYPORT;
---replace_result $SLAVE_MYPORT SLAVE_PORT
---replace_column 18 #
-show slave status;
-start slave;
-insert into t1 values (1);
-save_master_pos;
-sync_with_master;
-select * from t1; # check that indeed 2 were inserted
-# We stop the slave before cleaning up otherwise we'll get
-# 'drop table t1' executed twice, so an error in the slave.err
-# (not critical).
-stop slave;
-drop table t1;
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_session_var.test b/mysql-test/suite/engines/funcs/t/rpl_session_var.test
index a6f4b496a23..f1686e107b6 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_session_var.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_session_var.test
@@ -1,42 +1,2 @@
-# Replication of session variables.
-# FOREIGN_KEY_CHECKS is tested in rpl_insert_id.test
+--source suite/rpl/t/rpl_session_var.test
-source include/master-slave.inc;
-drop table if exists t1;
-create table t1(a varchar(100),b int);
-set @@session.sql_mode=pipes_as_concat;
-insert into t1 values('My'||'SQL', 1);
-set @@session.sql_mode=default;
-insert into t1 values('1'||'2', 2);
-select * from t1 where b<3 order by a;
-save_master_pos;
-connection slave;
-sync_with_master;
-select * from t1 where b<3 order by a;
-connection master;
-# if the slave does the next sync_with_master fine, then it means it accepts the
-# two lines of ANSI syntax below, which is what we want to check.
-set @@session.sql_mode=ignore_space;
-insert into t1 values(password ('MySQL'), 3);
-set @@session.sql_mode=ansi_quotes;
-create table "t2" ("a" int);
-drop table t1, t2;
-set @@session.sql_mode=default;
-create table t1(a int auto_increment primary key);
-create table t2(b int, a int);
-set @@session.sql_auto_is_null=1;
-insert into t1 values(null);
-insert into t2 select 1,a from t1 where a is null;
-set @@session.sql_auto_is_null=0;
-insert into t1 values(null);
-insert into t2 select 2,a from t1 where a is null;
-select * from t2 order by b;
-save_master_pos;
-connection slave;
-sync_with_master;
-select * from t2 order by b;
-connection master;
-drop table t1,t2;
-save_master_pos;
-connection slave;
-sync_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_sf.test b/mysql-test/suite/engines/funcs/t/rpl_sf.test
index 6a741d80172..7ce6dc002ae 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_sf.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_sf.test
@@ -1,5 +1,5 @@
# Bug#16456 RBR: rpl_sp.test expects query to fail, but passes in RBR
-source include/master-slave.inc;
+--source include/master-slave.inc
# save status
let $oblf=`select @@SESSION.BINLOG_FORMAT`;
@@ -66,3 +66,4 @@ drop function fn16456;
eval set binlog_format=$oblf;
eval set global log_bin_trust_function_creators=$otfc;
--enable_query_log
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_skip_error.test b/mysql-test/suite/engines/funcs/t/rpl_skip_error.test
index ff81e2f010e..083dfaa2075 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_skip_error.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_skip_error.test
@@ -2,12 +2,10 @@
# 2006-02-07 By JBM: Added order by
#########################################
# Note that errors are ignored by opt file.
-source include/master-slave.inc;
+--source include/master-slave.inc
create table t1 (n int not null primary key);
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
insert into t1 values (1);
connection master;
# Here we expect (ignored) error, since 1 is already in slave table
@@ -16,14 +14,13 @@ insert into t1 values (1);
# These should work fine
insert into t1 values (2),(3);
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
select * from t1 ORDER BY n;
# Cleanup
connection master;
drop table t1;
-sync_slave_with_master;
+--sync_slave_with_master
# End of 4.1 tests
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_slave_status.test b/mysql-test/suite/engines/funcs/t/rpl_slave_status.test
index b3d6e49e215..677d95117fc 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_slave_status.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_slave_status.test
@@ -1,58 +1,2 @@
---source include/master-slave.inc
+--source suite/rpl/t/rpl_slave_status.test
-############################################################################
-# Test case for BUG#10780
-#
-# REQUIREMENT
-# A slave without replication privileges should have Slave_IO_Running = No
-
-# 1. Create new replication user
-connection master;
-grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl';
-
-connection slave;
-stop slave;
-change master to master_user='rpl',master_password='rpl';
-start slave;
-
-# 2. Do replication as new user
-connection master;
---disable_warnings
-drop table if exists t1;
---enable_warnings
-create table t1 (n int);
-insert into t1 values (1);
-save_master_pos;
-connection slave;
-sync_with_master;
-select * from t1;
-
-# 3. Delete new replication user
-connection master;
-drop user rpl@127.0.0.1;
-sleep 1;
-flush privileges;
-connection slave;
-
-# 4. Restart slave without privileges
-# (slave.err will contain access denied error for this START SLAVE command)
-stop slave;
-start slave;
-
-# 5. Make sure Slave_IO_Running = No
---replace_result $MASTER_MYPORT MASTER_MYPORT
-# Column 1 is replaced, since the output can be either
-# "Connecting to master" or "Waiting for master update"
---replace_column 1 # 7 # 8 # 9 # 22 # 23 # 35 # 36 #
---vertical_results
-show slave status;
-
-# Cleanup (Note that slave IO thread is not running)
-connection slave;
-drop table t1;
-connection master;
-drop table t1;
-
-# end of test case for BUG#10780
-
-# end of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_sp.test b/mysql-test/suite/engines/funcs/t/rpl_sp.test
index ad2cc29375b..773062275c9 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_sp.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_sp.test
@@ -1,496 +1,2 @@
-# row-based and statement have expected binlog difference in result files
-
-# Test of replication of stored procedures (WL#2146 for MySQL 5.0)
-# Modified by WL#2971.
-
-source include/have_binlog_format_mixed.inc;
-source include/master-slave.inc;
-
-# we need a db != test, where we don't have automatic grants
---disable_warnings
-drop database if exists mysqltest1;
---enable_warnings
-create database mysqltest1;
-use mysqltest1;
-create table t1 (a varchar(100));
-sync_slave_with_master;
-use mysqltest1;
-
-# ********************** PART 1 : STORED PROCEDURES ***************
-
-# Does the same proc as on master get inserted into mysql.proc ?
-# (same definer, same properties...)
-
-connection master;
-
-delimiter |;
-
-# Stored procedures don't have the limitations that functions have
-# regarding binlogging: it's ok to create a procedure as not
-# deterministic and updating data, while it's not ok to create such a
-# function. We test this.
-
-create procedure foo()
-begin
- declare b int;
- set b = 8;
- insert into t1 values (b);
- insert into t1 values (unix_timestamp());
-end|
-delimiter ;|
-
-# we replace columns having times
-# (even with fixed timestamp displayed time may changed based on TZ)
---replace_result localhost.localdomain localhost 127.0.0.1 localhost
---replace_column 13 # 14 #
-select * from mysql.proc where name='foo' and db='mysqltest1';
-sync_slave_with_master;
-# You will notice in the result that the definer does not match what
-# it is on master, it is a known bug on which Alik is working
---replace_result localhost.localdomain localhost 127.0.0.1 localhost
---replace_column 13 # 14 #
-select * from mysql.proc where name='foo' and db='mysqltest1';
-
-connection master;
-# see if timestamp used in SP on slave is same as on master
-set timestamp=1000000000;
-call foo();
-select * from t1;
-sync_slave_with_master;
-select * from t1;
-
-# Now a SP which is not updating tables
-
-connection master;
-delete from t1;
-create procedure foo2()
- select * from mysqltest1.t1;
-call foo2();
-
-# check that this is allowed (it's not for functions):
-alter procedure foo2 contains sql;
-
-# SP with definer's right
-
-drop table t1;
-create table t1 (a int);
-create table t2 like t1;
-
-create procedure foo3()
- deterministic
- insert into t1 values (15);
-
-# let's create a non-privileged user
-grant CREATE ROUTINE, EXECUTE on mysqltest1.* to "zedjzlcsjhd"@127.0.0.1;
-grant SELECT on mysqltest1.t1 to "zedjzlcsjhd"@127.0.0.1;
-grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1;
-
-# ToDo: BUG#14931: There is a race between the last grant binlogging, and
-# the binlogging in the new connection made below, causing sporadic test
-# failures due to switched statement order in binlog. To fix this we do
-# SELECT 1 in the first connection before starting the second, ensuring
-# that binlogging is done in the expected order.
-# Please remove this SELECT 1 when BUG#14931 is fixed.
-SELECT 1;
-
-connect (con1,127.0.0.1,zedjzlcsjhd,,mysqltest1,$MASTER_MYPORT,);
-connection con1;
-
-# this routine will fail in the second INSERT because of privileges
-delimiter |;
-create procedure foo4()
- deterministic
- begin
- insert into t2 values(3);
- insert into t1 values (5);
- end|
-
-delimiter ;|
-
-# I add ,0 so that it does not print the error in the test output,
-# because this error is hostname-dependent
---error 1142,0
-call foo4(); # invoker has no INSERT grant on table t1 => failure
-
-connection master;
-call foo3(); # success (definer == root)
-show warnings;
-
---error 1142,0
-call foo4(); # definer's rights => failure
-
-# we test replication of ALTER PROCEDURE
-alter procedure foo4 sql security invoker;
-call foo4(); # invoker's rights => success
-show warnings;
-
-# Note that half-failed procedure calls are ok with binlogging;
-# if we compare t2 on master and slave we see they are identical:
-
-select * from t1;
-select * from t2;
-sync_slave_with_master;
-select * from t1;
-select * from t2;
-
-# Test of DROP PROCEDURE
-
---replace_result localhost.localdomain localhost 127.0.0.1 localhost
---replace_column 13 # 14 #
-select * from mysql.proc where name="foo4" and db='mysqltest1';
-connection master;
-drop procedure foo4;
-select * from mysql.proc where name="foo4" and db='mysqltest1';
-sync_slave_with_master;
-select * from mysql.proc where name="foo4" and db='mysqltest1';
-
-# ********************** PART 2 : FUNCTIONS ***************
-
-connection master;
-drop procedure foo;
-drop procedure foo2;
-drop procedure foo3;
-
-delimiter |;
-# check that needs "deterministic"
---error 1418
-create function fn1(x int)
- returns int
-begin
- insert into t1 values (x);
- return x+2;
-end|
-create function fn1(x int)
- returns int
- deterministic
-begin
- insert into t1 values (x);
- return x+2;
-end|
-
-delimiter ;|
-delete t1,t2 from t1,t2;
-select fn1(20);
-insert into t2 values(fn1(21));
---sorted_result
-select * from t1;
-select * from t2;
-sync_slave_with_master;
---sorted_result
-select * from t1;
-select * from t2;
-
-connection master;
-delimiter |;
-
-drop function fn1;
-
-create function fn1()
- returns int
- no sql
-begin
- return unix_timestamp();
-end|
-
-delimiter ;|
-# check that needs "deterministic"
---error 1418
-alter function fn1 contains sql;
-
-delete from t1;
-set timestamp=1000000000;
-insert into t1 values(fn1());
-
-connection con1;
-
-delimiter |;
---error 1419 # only full-global-privs user can create a function
-create function fn2()
- returns int
- no sql
-begin
- return unix_timestamp();
-end|
-delimiter ;|
-connection master;
-set global log_bin_trust_function_creators=0;
-set global log_bin_trust_function_creators=1;
-# slave needs it too otherwise will not execute what master allowed:
-connection slave;
-set global log_bin_trust_function_creators=1;
-
-connection con1;
-
-delimiter |;
-create function fn2()
- returns int
- no sql
-begin
- return unix_timestamp();
-end|
-delimiter ;|
-
-connection master;
-
-# Now a function which is supposed to not update tables
-# as it's "reads sql data", so should not give error even if
-# non-deterministic.
-
-delimiter |;
-create function fn3()
- returns int
- not deterministic
- reads sql data
-begin
- return 0;
-end|
-delimiter ;|
-
-select fn3();
---replace_result localhost.localdomain localhost 127.0.0.1 localhost
---replace_column 13 # 14 #
-select * from mysql.proc where db='mysqltest1';
-select * from t1;
-
-sync_slave_with_master;
-use mysqltest1;
-select * from t1;
---replace_result localhost.localdomain localhost 127.0.0.1 localhost
---replace_column 13 # 14 #
-select * from mysql.proc where db='mysqltest1';
-
-# ********************** PART 3 : TRIGGERS ***************
-
-connection con1;
-# now fails due to missing trigger grant (err 1142 i/o 1227) due to new
-# check in sql_trigger.cc (v1.44) by anozdrin on 2006/02/01 --azundris
---error ER_TABLEACCESS_DENIED_ERROR
-create trigger trg before insert on t1 for each row set new.a= 10;
-
-connection master;
-delete from t1;
-# TODO: when triggers can contain an update, test that this update
-# does not go into binlog.
-# I'm not setting user vars in the trigger, because replication of user vars
-# would take care of propagating the user var's value to slave, so even if
-# the trigger was not executed on slave it would not be discovered.
-create trigger trg before insert on t1 for each row set new.a= 10;
-insert into t1 values (1);
-select * from t1;
-sync_slave_with_master;
-select * from t1;
-
-connection master;
-delete from t1;
-drop trigger trg;
-insert into t1 values (1);
-select * from t1;
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-#show binlog events in 'master-bin.000001' from 106;
-sync_slave_with_master;
-select * from t1;
-
-
-#
-# Test for bug #13969 "Routines which are replicated from master can't be
-# executed on slave".
-#
-connection master;
-create procedure foo()
- not deterministic
- reads sql data
- select * from t1;
-sync_slave_with_master;
-# This should not fail
-call foo();
-connection master;
-drop procedure foo;
-sync_slave_with_master;
-
-
-# Clean up
-connection master;
-drop function fn1;
-drop database mysqltest1;
-drop user "zedjzlcsjhd"@127.0.0.1;
-use test;
-sync_slave_with_master;
-use test;
-
-#
-# Bug#14077 "Failure to replicate a stored function with a cursor":
-# verify that stored routines with cursors work on slave.
-#
-connection master;
---disable_warnings
-drop function if exists f1;
---enable_warnings
-delimiter |;
-create function f1() returns int reads sql data
-begin
- declare var integer;
- declare c cursor for select a from v1;
- open c;
- fetch c into var;
- close c;
- return var;
-end|
-delimiter ;|
-create view v1 as select 1 as a;
-create table t1 (a int);
-insert into t1 (a) values (f1());
-select * from t1;
-drop view v1;
-drop function f1;
-sync_slave_with_master;
-connection slave;
-select * from t1;
-
-#
-# Bug#16621 "INSERTs in Stored Procedures causes data corruption in the Binary
-# Log for 5.0.18"
-#
-
-# Prepare environment.
-
-connection master;
-
---disable_warnings
-DROP PROCEDURE IF EXISTS p1;
-DROP TABLE IF EXISTS t1;
---enable_warnings
-
-# Test case.
-
-CREATE TABLE t1(col VARCHAR(10));
-
-CREATE PROCEDURE p1(arg VARCHAR(10))
- INSERT INTO t1 VALUES(arg);
-
-CALL p1('test');
-
-SELECT * FROM t1;
-
-sync_slave_with_master;
-SELECT * FROM t1;
-
-# Cleanup
-connection master;
-DROP PROCEDURE p1;
-
-
-#
-# BUG#20438: CREATE statements for views, stored routines and triggers can be
-# not replicable.
-#
-
---echo
---echo ---> Test for BUG#20438
-
-# Prepare environment.
-
---echo
---echo ---> Preparing environment...
---echo ---> connection: master
---connection master
-
---disable_warnings
-DROP PROCEDURE IF EXISTS p1;
-DROP FUNCTION IF EXISTS f1;
---enable_warnings
-
---echo
---echo ---> Synchronizing slave with master...
-
---save_master_pos
---connection slave
---sync_with_master
-
---echo
---echo ---> connection: master
---connection master
-
-# Test.
-
---echo
---echo ---> Creating procedure...
-
-/*!50003 CREATE PROCEDURE p1() SET @a = 1 */;
-
-/*!50003 CREATE FUNCTION f1() RETURNS INT RETURN 0 */;
-
---echo
---echo ---> Checking on master...
-
-SHOW CREATE PROCEDURE p1;
-SHOW CREATE FUNCTION f1;
-
---echo
---echo ---> Synchronizing slave with master...
-
---save_master_pos
---connection slave
---sync_with_master
-
---echo ---> connection: master
-
---echo
---echo ---> Checking on slave...
-
-SHOW CREATE PROCEDURE p1;
-SHOW CREATE FUNCTION f1;
-
-# Cleanup.
-
---echo
---echo ---> connection: master
---connection master
-
---echo
---echo ---> Cleaning up...
-
-DROP PROCEDURE p1;
-DROP FUNCTION f1;
-
---save_master_pos
---connection slave
---sync_with_master
---connection master
-
-
-# cleanup
-connection master;
-drop table t1;
-sync_slave_with_master;
-
-# Restore log_bin_trust_function_creators to original value
-set global log_bin_trust_function_creators=0;
-connection master;
-set global log_bin_trust_function_creators=0;
---echo End of 5.0 tests
-
-#
-# Bug22043: MySQL don't add "USE <DATABASE>" before "DROP PROCEDURE IF EXISTS"
-#
-connection master;
-reset master;
---disable_warnings
-drop database if exists mysqltest;
-drop database if exists mysqltest2;
---enable_warnings
-create database mysqltest;
-create database mysqltest2;
-use mysqltest2;
-create table t ( t integer );
-create procedure mysqltest.test() begin end;
-insert into t values ( 1 );
-#show binlog events in 'master-bin.000001' from 106;
---error ER_BAD_DB_ERROR
-create procedure `\\`.test() begin end;
-# Clean up
-drop database mysqltest;
-drop database mysqltest2;
-
---echo End of 5.1 tests
+--source suite/rpl/t/rpl_sp.test
diff --git a/mysql-test/suite/engines/funcs/t/rpl_sp004.test b/mysql-test/suite/engines/funcs/t/rpl_sp004.test
index 967e7007c15..ca5c802b279 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_sp004.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_sp004.test
@@ -1,97 +1,2 @@
-#############################################################################
-# Original Author: JBM #
-# Original Date: Aug/14/2005 #
-#############################################################################
-# Test: This test contains two sp that create and drop tables, insert and #
-# updated data and uses the NOW() function. #
-#############################################################################
+--source suite/rpl/t/rpl_sp004.test
-
-# Includes
--- source include/master-slave.inc
-
-
-# Begin clean up test section
-connection master;
---disable_warnings
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t3;
---enable_warnings
-# End of cleanup
-
-# Begin test section 1
-
-delimiter |;
-CREATE PROCEDURE test.p1()
-BEGIN
- CREATE TABLE IF NOT EXISTS test.t1(a INT,PRIMARY KEY(a));
- CREATE TABLE IF NOT EXISTS test.t2(a INT,PRIMARY KEY(a));
- INSERT INTO test.t1 VALUES (4),(2),(1),(3);
- UPDATE test.t1 SET a=a+4 WHERE a=4;
- INSERT INTO test.t2 (a) SELECT t1.a FROM test.t1;
- UPDATE test.t1 SET a=a+4 WHERE a=8;
- CREATE TABLE IF NOT EXISTS test.t3(n MEDIUMINT NOT NULL AUTO_INCREMENT, f FLOAT, d DATETIME, PRIMARY KEY(n));
-END|
-CREATE PROCEDURE test.p2()
-BEGIN
- DROP TABLE IF EXISTS test.t1;
- DROP TABLE IF EXISTS test.t2;
- INSERT INTO test.t3 VALUES(NULL,11111111.233333,NOW());
-END|
-delimiter ;|
-
-CALL test.p1();
-SELECT * FROM test.t1 ORDER BY a;
-SELECT * FROM test.t2 ORDER BY a;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t1 ORDER BY a;
-SELECT * FROM test.t2 ORDER BY a;
-
-connection master;
-CALL test.p2();
-USE test;
-SHOW TABLES;
-#SELECT * FROM test.t3;
-save_master_pos;
-connection slave;
-sync_with_master;
-USE test;
-SHOW TABLES;
-#SELECT * FROM test.t3;
-
-connection master;
-CALL test.p1();
-SELECT * FROM test.t1 ORDER BY a;
-SELECT * FROM test.t2 ORDER BY a;
-#SELECT * FROM test.t3;
-save_master_pos;
-connection slave;
-sync_with_master;
-SELECT * FROM test.t1 ORDER BY a;
-SELECT * FROM test.t2 ORDER BY a;
-#SELECT * FROM test.t3;
-
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp004_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/sp004_slave.sql
-
-# Cleanup
-connection master;
-#show binlog events;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP PROCEDURE IF EXISTS test.p2;
-DROP TABLE IF EXISTS test.t1;
-DROP TABLE IF EXISTS test.t2;
-DROP TABLE IF EXISTS test.t3;
-sync_slave_with_master;
-
-# If the test fails, you will need to diff the dumps to see why.
-
-diff_files $MYSQLTEST_VARDIR/tmp/sp004_master.sql $MYSQLTEST_VARDIR/tmp/sp004_slave.sql;
-
-
-# End of 5.0 test case
diff --git a/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test b/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test
index 94ce539291d..29b27c728e4 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test
@@ -1,208 +1,2 @@
-##########################################
-# Change Author: JBM
-# Change Date: 2006-05-02
-##########################################
+--source suite/rpl/t/rpl_sp_effects.test
-# Test of replication of stored procedures (WL#2146 for MySQL 5.0)
--- source include/master-slave.inc
-
-# ****************************************************************
-connection master;
-
-# cleanup
---disable_warnings
-drop procedure if exists p1;
-drop procedure if exists p2;
-drop function if exists f1;
-drop table if exists t1,t2;
-drop view if exists v1;
---enable_warnings
-create table t1 (a int);
-
-SET GLOBAL log_bin_trust_function_creators = 1;
-
-# 1. Test simple variables use.
-delimiter //;
-create procedure p1()
-begin
- declare spv int default 0;
- while spv < 5 do
- insert into t1 values(spv+1);
- set spv=spv+1;
- end while;
-end//
-delimiter ;//
-
-call p1();
-
-sync_slave_with_master;
-connection slave;
-SELECT * FROM t1 ORDER BY a;
-connection master;
-SELECT * FROM t1 ORDER BY a;
-
-# 2. Test SP variable name
-delimiter //;
-create procedure p2()
-begin
- declare a int default 4;
- create table t2 as select a;
-end//
-delimiter ;//
-
-call p2();
-SELECT * FROM t2 ORDER BY a;
-sync_slave_with_master;
-connection slave;
-SELECT * FROM t2 ORDER BY a;
-
-connection master;
-drop procedure p1;
-drop procedure p2;
-drop table t2;
-
-# 3. Test FUNCTIONs in various places
-
-delimiter //;
-create function f1(x int) returns int
-begin
- insert into t1 values(x);
- return x+1;
-end//
-
-create procedure p1(a int, b int)
-begin
- declare v int default f1(5);
- if (f1(6)) then
- select 'yes';
- end if;
- set v = f1(7);
- while f1(8) < 1 do
- select 'this cant be';
- end while;
-
-end//
-delimiter ;//
-
-call p1(f1(1), f1(2));
-SELECT * FROM t1 ORDER BY a;
-
-create table t2(a int);
-insert into t2 values (10),(11);
-SELECT a,f1(a) FROM t2 ORDER BY a;
-
-# This shouldn't put separate 'call f1(3)' into binlog:
-insert into t2 select f1(3);
-SELECT 'master:',a FROM t1 ORDER BY a;
-
-sync_slave_with_master;
-connection slave;
-SELECT 'slave:',a FROM t1 ORDER BY a;
-
-connection master;
-drop procedure p1;
-delete from t1;
-delete from t2;
-
-# 4. VIEWs
-delete from t1;
-insert into t2 values(1),(2);
-create view v1 as select f1(a) as f from t2;
-select * from v1 order by f;
-SELECT 'master:',a FROM t1 ORDER BY a;
-
-sync_slave_with_master;
-connection slave;
-SELECT 'slave:',a FROM t1 ORDER BY a;
-
-connection master;
-drop view v1;
-delete from t1;
-
-# 5. Prepared statements.
-prepare s1 from 'select f1(?)';
-set @xx=123;
-execute s1 using @xx;
-SELECT 'master:',a FROM t1 ORDER BY a;
-
-sync_slave_with_master;
-connection slave;
-SELECT 'slave:',a FROM t1 ORDER BY a;
-
-connection master;
-delete from t1;
-
-# 5. Cursors.
-# t2 has (1),(2);
-delimiter //;
-create procedure p1(spv int)
-begin
- declare c cursor for select f1(spv) from t2;
- while (spv > 2) do
- open c;
- fetch c into spv;
- close c;
- set spv= spv - 10;
- end while;
-end//
-delimiter ;//
-call p1(15);
-SELECT 'master:',a FROM t1 ORDER BY a;
-sync_slave_with_master;
-connection slave;
-SELECT 'slave:',a FROM t1 ORDER BY a;
-
-connection master;
-drop procedure p1;
-drop function f1;
-drop table t1,t2;
-
-# BUG#12637: User variables + SPs replication
-create table t1 (a int);
-delimiter //;
-create procedure p1()
-begin
- insert into t1 values(@x);
- set @x=@x+1;
- insert into t1 values(@x);
- if (f2()) then
- insert into t1 values(1243);
- end if;
-end//
-
-create function f2() returns int
-begin
- insert into t1 values(@z);
- set @z=@z+1;
- insert into t1 values(@z);
- return 0;
-end//
-
-create function f1() returns int
-begin
- insert into t1 values(@y);
- call p1();
- return 0;
-end//
-
-delimiter ;//
-
-set @x=10;
-set @y=20;
-set @z=100;
-select f1();
-
-set @x=30;
-call p1();
-
-SELECT 'master', a FROM t1 ORDER BY a;
-sync_slave_with_master;
-connection slave;
-SELECT 'slave', a FROM t1 ORDER BY a;
-
-connection master;
-drop table t1;
-drop function f1;
-drop function f2;
-drop procedure p1;
-sync_slave_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_start_stop_slave.test b/mysql-test/suite/engines/funcs/t/rpl_start_stop_slave.test
index 19988cf902a..632a1a0232c 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_start_stop_slave.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_start_stop_slave.test
@@ -1,10 +1,10 @@
-source include/master-slave.inc;
+--source include/master-slave.inc
#
# Bug#6148 ()
#
connection slave;
-stop slave;
+--source include/stop_slave.inc
# Let the master do lots of insertions
connection master;
@@ -17,20 +17,19 @@ while ($1)
dec $1;
}
enable_query_log;
-save_master_pos;
connection slave;
-start slave;
-sleep 1;
-stop slave io_thread;
+--source include/start_slave.inc
+--source include/stop_slave_io.inc
start slave io_thread;
-sync_with_master;
+--source include/wait_for_slave_io_to_start.inc
connection master;
-drop table t1;
-save_master_pos;
+--sync_slave_with_master
-connection slave;
-sync_with_master;
+connection master;
+drop table t1;
+--sync_slave_with_master
-# End of 4.1 tests
+# End of test
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_stm_mystery22.test b/mysql-test/suite/engines/funcs/t/rpl_stm_mystery22.test
index 017593fdfba..ca1b14201d3 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_stm_mystery22.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_stm_mystery22.test
@@ -20,7 +20,7 @@
# first, cause a duplicate key problem on the slave
create table t1(n int auto_increment primary key, s char(10));
-sync_slave_with_master;
+--sync_slave_with_master
insert into t1 values (2,'old');
connection master;
insert into t1 values(NULL,'new');
@@ -31,13 +31,13 @@ connection slave;
wait_for_slave_to_stop;
select * from t1 order by n;
delete from t1 where n = 2;
---disable_warnings
-start slave;
---enable_warnings
+
+--source include/start_slave.inc
+
sync_with_master;
#now the buggy slave would be confused on the offset but it can replicate
#in order to make it break, we need to stop/start the slave one more time
-stop slave;
+--source include/stop_slave.inc
connection master;
# to be able to really confuse the slave, we need some non-auto-increment
# events in the log
@@ -51,9 +51,9 @@ set sql_log_bin=1;
delete from t1 where n=4;
save_master_pos;
connection slave;
---disable_warnings
-start slave;
---enable_warnings
+
+--source include/start_slave.inc
+
#now the truth comes out - if the slave is buggy, it will never sync because
#the slave thread is not able to read events
sync_with_master;
@@ -61,6 +61,6 @@ select * from t1 order by n;
#clean up
connection master;
drop table t1;
-sync_slave_with_master;
-
+--sync_slave_with_master
+--source include/rpl_end.inc
# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_stm_no_op.test b/mysql-test/suite/engines/funcs/t/rpl_stm_no_op.test
index 66dc89bd712..e1f920b0928 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_stm_no_op.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_stm_no_op.test
@@ -1,93 +1,2 @@
-# It's true only in statement-based replication that a statement which
-# updates no rows (UPDATE/DELETE) is binlogged; in row-based
-# replication, as we log modified rows, nothing is binlogged in this
-# case. So this test is meaningul only in statement-based (and if it was
-# enabled in row-based, it would fail as expected).
+--source suite/rpl/t/rpl_stm_no_op.test
--- source include/have_binlog_format_mixed_or_statement.inc
-
-source include/master-slave.inc;
-
-# see if DROP DATABASE is binlogged even if no effect
-connection slave;
-create database mysqltest;
-connection master;
-drop database if exists mysqltest;
-sync_slave_with_master;
-# can't read dir
---replace_result "Errcode: 1" "Errcode: X" "Errcode: 2" "Errcode: X" \\ /
---error 1049
-show tables from mysqltest;
-
-# see if DROP TABLE is binlogged even if no effect
-connection slave;
-create table t1 (a int);
-connection master;
-drop table if exists t1;
-sync_slave_with_master;
-# table does not exist
---error 1146
-select * from t1;
-
-# see if single-table DELETE is binlogged even if no effect
-connection master;
-create table t1 (a int, b int);
-sync_slave_with_master;
-insert into t1 values(1,1);
-connection master;
-delete from t1;
-sync_slave_with_master;
-select * from t1;
-
-# see if single-table UPDATE is binlogged even if no effect
-insert into t1 values(1,1);
-connection master;
-insert into t1 values(2,1);
-update t1 set a=2;
-sync_slave_with_master;
-select * from t1;
-
-# End of 4.1 tests
-
-# see if multi-table UPDATE is binlogged even if no effect (BUG#13348)
-
-connection master;
-create table t2 (a int, b int);
-delete from t1;
-insert into t1 values(1,1);
-insert into t2 values(1,1);
-
-sync_slave_with_master;
-# force a difference to see if master's multi-UPDATE will correct it
-update t1 set a=2;
-
-connection master;
-UPDATE t1, t2 SET t1.a = t2.a;
-
-sync_slave_with_master;
-select * from t1;
-select * from t2;
-
-# See if multi-table DELETE is binlogged even if no effect
-
-connection master;
-delete from t1;
-delete from t2;
-
-sync_slave_with_master;
-# force a difference to see if master's multi-DELETE will correct it
-insert into t1 values(1,1);
-insert into t2 values(1,1);
-
-connection master;
-DELETE t1.*, t2.* from t1, t2;
-
-sync_slave_with_master;
-select * from t1;
-select * from t2;
-
-
-# cleanup
-connection master;
-drop table t1, t2;
-sync_slave_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test
index 396ba4073e4..2a16d90f9ad 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test
@@ -1,564 +1 @@
--- source include/master-slave.inc
-
-connection default;
-set @saved_binlog_format = @@global.binlog_format;
-
-# Since this test generates row-based events in the binary log, the
-# slave SQL thread cannot be in STATEMENT mode to execute this test,
-# so we only execute it for MIXED and ROW as default value of
-# BINLOG_FORMAT.
-
-connection slave;
--- source include/have_binlog_format_mixed_or_row.inc
-
-connection master;
---disable_warnings
-drop database if exists mysqltest1;
-create database mysqltest1;
---enable_warnings
-use mysqltest1;
-set session binlog_format=row;
-set global binlog_format=row;
-
-show global variables like "binlog_format%";
-show session variables like "binlog_format%";
-select @@global.binlog_format, @@session.binlog_format;
-
-CREATE TABLE t1 (a varchar(100));
-
-prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency_1_";
-insert into t1 values("work_2_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work_3_"));
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-insert into t1 values(concat("for_4_",UUID()));
-insert into t1 select "yesterday_5_";
-
-# verify that temp tables prevent a switch to SBR
-create temporary table tmp(a char(100));
-insert into tmp values("see_6_");
---error ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
-set binlog_format=statement;
-insert into t1 select * from tmp;
-drop temporary table tmp;
-
-# Now we go to SBR
-set binlog_format=statement;
-show global variables like "binlog_format%";
-show session variables like "binlog_format%";
-select @@global.binlog_format, @@session.binlog_format;
-set global binlog_format=statement;
-show global variables like "binlog_format%";
-show session variables like "binlog_format%";
-select @@global.binlog_format, @@session.binlog_format;
-
-prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency_7_";
-insert into t1 values("work_8_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work_9_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-insert into t1 values("for_10_");
-insert into t1 select "yesterday_11_";
-
-# test SET DEFAULT (=statement at this point of test)
-set binlog_format=default;
-select @@global.binlog_format, @@session.binlog_format;
-# due to cluster it's hard to set back to default
-#--error ER_NO_DEFAULT
-set global binlog_format=default;
-select @@global.binlog_format, @@session.binlog_format;
-
-prepare stmt1 from 'insert into t1 select ?';
-set @string="emergency_12_";
-insert into t1 values("work_13_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values("work_14_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-insert into t1 values("for_15_");
-insert into t1 select "yesterday_16_";
-
-# and now the mixed mode
-
-set binlog_format=mixed;
-select @@global.binlog_format, @@session.binlog_format;
-set global binlog_format=mixed;
-select @@global.binlog_format, @@session.binlog_format;
-
-prepare stmt1 from 'insert into t1 select concat(UUID(),?)';
-set @string="emergency_17_";
-insert into t1 values("work_18_");
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work_19_"));
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-insert into t1 values(concat("for_20_",UUID()));
-insert into t1 select "yesterday_21_";
-
-prepare stmt1 from 'insert into t1 select ?';
-insert into t1 values(concat(UUID(),"work_22_"));
-execute stmt1 using @string;
-deallocate prepare stmt1;
-
-insert into t1 values(concat("for_23_",UUID()));
-insert into t1 select "yesterday_24_";
-
-# Test of CREATE TABLE SELECT
-
-create table t2 select rpad(UUID(),100,' ');
-create table t3 select 1 union select UUID();
-create table t4 select * from t1 where 3 in (select 1 union select 2 union select UUID() union select 3);
-create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
-# what if UUID() is first:
---disable_warnings
-insert into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
---enable_warnings
-
-# inside a stored procedure
-
-delimiter |;
-create procedure foo()
-begin
-insert into t1 values("work_25_");
-insert into t1 values(concat("for_26_",UUID()));
-insert into t1 select "yesterday_27_";
-end|
-create procedure foo2()
-begin
-insert into t1 values(concat("emergency_28_",UUID()));
-insert into t1 values("work_29_");
-insert into t1 values(concat("for_30_",UUID()));
-set session binlog_format=row; # accepted for stored procs
-insert into t1 values("more work_31_");
-set session binlog_format=mixed;
-end|
-create function foo3() returns bigint unsigned
-begin
- set session binlog_format=row; # rejected for stored funcs
- insert into t1 values("alarm");
- return 100;
-end|
-create procedure foo4(x varchar(100))
-begin
-insert into t1 values(concat("work_250_",x));
-insert into t1 select "yesterday_270_";
-end|
-delimiter ;|
-call foo();
-call foo2();
-call foo4("hello");
-call foo4(UUID());
-call foo4("world");
-
-# test that can't SET in a stored function
---error ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
-select foo3();
-select * from t1 where a="alarm";
-
-# Tests of stored functions/triggers/views for BUG#20930 "Mixed
-# binlogging mode does not work with stored functions, triggers,
-# views"
-
-# Function which calls procedure
-drop function foo3;
-delimiter |;
-create function foo3() returns bigint unsigned
-begin
- insert into t1 values("foo3_32_");
- call foo();
- return 100;
-end|
-delimiter ;|
-insert into t2 select foo3();
-
-prepare stmt1 from 'insert into t2 select foo3()';
-execute stmt1;
-execute stmt1;
-deallocate prepare stmt1;
-
-# Test if stored function calls stored function which calls procedure
-# which requires row-based.
-
-delimiter |;
-create function foo4() returns bigint unsigned
-begin
- insert into t2 select foo3();
- return 100;
-end|
-delimiter ;|
-select foo4();
-
-prepare stmt1 from 'select foo4()';
-execute stmt1;
-execute stmt1;
-deallocate prepare stmt1;
-
-# A simple stored function
-delimiter |;
-create function foo5() returns bigint unsigned
-begin
- insert into t2 select UUID();
- return 100;
-end|
-delimiter ;|
-select foo5();
-
-prepare stmt1 from 'select foo5()';
-execute stmt1;
-execute stmt1;
-deallocate prepare stmt1;
-
-# A simple stored function where UUID() is in the argument
-delimiter |;
-create function foo6(x varchar(100)) returns bigint unsigned
-begin
- insert into t2 select x;
- return 100;
-end|
-delimiter ;|
-select foo6("foo6_1_");
-select foo6(concat("foo6_2_",UUID()));
-
-prepare stmt1 from 'select foo6(concat("foo6_3_",UUID()))';
-execute stmt1;
-execute stmt1;
-deallocate prepare stmt1;
-
-
-# Test of views using UUID()
-
-create view v1 as select uuid();
-create table t11 (data varchar(255));
-insert into t11 select * from v1;
-# Test of querying INFORMATION_SCHEMA which parses the view's body,
-# to verify that it binlogs statement-based (is not polluted by
-# the parsing of the view's body).
-insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11');
-prepare stmt1 from "insert into t11 select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA='mysqltest1' and TABLE_NAME IN ('v1','t11')";
-execute stmt1;
-execute stmt1;
-deallocate prepare stmt1;
-
-# Test of triggers with UUID()
-delimiter |;
-create trigger t11_bi before insert on t11 for each row
-begin
- set NEW.data = concat(NEW.data,UUID());
-end|
-delimiter ;|
-insert into t11 values("try_560_");
-
-# DELAYED option not supported by table created
-# using innodb.
-# Test that INSERT DELAYED works in mixed mode (BUG#20649)
-#insert delayed into t2 values("delay_1_");
-#insert delayed into t2 values(concat("delay_2_",UUID()));
-#insert delayed into t2 values("delay_6_");
-
-# Test for BUG#20633 (INSERT DELAYED RAND()/user_variable does not
-# replicate fine in statement-based ; we test that in mixed mode it
-# works).
-#insert delayed into t2 values(rand());
-#set @a=2.345;
-#insert delayed into t2 values(@a);
-
-sleep 4; # time for the delayed inserts to reach disk
-
-# If you want to do manual testing of the mixed mode regarding UDFs (not
-# testable automatically as quite platform- and compiler-dependent),
-# you just need to set the variable below to 1, and to
-# "make udf_example.so" in sql/, and to copy sql/udf_example.so to
-# MYSQL_TEST_DIR/lib/mysql.
-let $you_want_to_test_UDF=0;
-if ($you_want_to_test_UDF)
-{
- CREATE FUNCTION metaphon RETURNS STRING SONAME 'udf_example.so';
- prepare stmt1 from 'insert into t1 select metaphon(?)';
- set @string="emergency_133_";
- insert into t1 values("work_134_");
- execute stmt1 using @string;
- deallocate prepare stmt1;
- prepare stmt1 from 'insert into t1 select ?';
- insert into t1 values(metaphon("work_135_"));
- execute stmt1 using @string;
- deallocate prepare stmt1;
- insert into t1 values(metaphon("for_136_"));
- insert into t1 select "yesterday_137_";
- create table t6 select metaphon("for_138_");
- create table t7 select 1 union select metaphon("for_139_");
- create table t8 select * from t1 where 3 in (select 1 union select 2 union select metaphon("for_140_") union select 3);
- create table t9 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
-}
-
-create table t20 select * from t1; # save for comparing later
-create table t21 select * from t2;
-create table t22 select * from t3;
-drop table t1,t2,t3;
-
-# This tests the fix to
-# BUG#19630 stored function inserting into two auto_increment breaks statement-based binlog
-# We verify that under the mixed binlog mode, a stored function
-# modifying at least two tables having an auto_increment column,
-# is binlogged row-based. Indeed in statement-based binlogging,
-# only the auto_increment value generated for the first table
-# is recorded in the binlog, the value generated for the 2nd table
-# lacking.
-
-create table t1 (a int primary key auto_increment, b varchar(100));
-create table t2 (a int primary key auto_increment, b varchar(100));
-create table t3 (b varchar(100));
-delimiter |;
-create function f (x varchar(100)) returns int deterministic
-begin
- insert into t1 values(null,x);
- insert into t2 values(null,x);
- return 1;
-end|
-delimiter ;|
-select f("try_41_");
-# Two operations which compensate each other except that their net
-# effect is that they advance the auto_increment counter of t2 on slave:
-sync_slave_with_master;
-use mysqltest1;
-insert into t2 values(2,null),(3,null),(4,null);
-delete from t2 where a>=2;
-
-connection master;
-# this is the call which didn't replicate well
-select f("try_42_");
-sync_slave_with_master;
-
-# now use prepared statement and test again, just to see that the RBB
-# mode isn't set at PREPARE but at EXECUTE.
-
-insert into t2 values(3,null),(4,null);
-delete from t2 where a>=3;
-
-connection master;
-prepare stmt1 from 'select f(?)';
-set @string="try_43_";
-insert into t1 values(null,"try_44_"); # should be SBB
-execute stmt1 using @string; # should be RBB
-deallocate prepare stmt1;
-sync_slave_with_master;
-
-# verify that if only one table has auto_inc, it does not trigger RBB
-# (we'll check in binlog further below)
-
-connection master;
-create table t12 select * from t1; # save for comparing later
-drop table t1;
-create table t1 (a int, b varchar(100), key(a));
-select f("try_45_");
-
-# restore table's key
-create table t13 select * from t1;
-drop table t1;
-create table t1 (a int primary key auto_increment, b varchar(100));
-
-# now test if it's two functions, each of them inserts in one table
-
-drop function f;
-# we need a unique key to have sorting of rows by mysqldump
-create table t14 (unique (a)) select * from t2;
-truncate table t2;
-delimiter |;
-create function f1 (x varchar(100)) returns int deterministic
-begin
- insert into t1 values(null,x);
- return 1;
-end|
-create function f2 (x varchar(100)) returns int deterministic
-begin
- insert into t2 values(null,x);
- return 1;
-end|
-delimiter ;|
-select f1("try_46_"),f2("try_47_");
-
-sync_slave_with_master;
-insert into t2 values(2,null),(3,null),(4,null);
-delete from t2 where a>=2;
-
-connection master;
-# Test with SELECT and INSERT
-select f1("try_48_"),f2("try_49_");
-insert into t3 values(concat("try_50_",f1("try_51_"),f2("try_52_")));
-sync_slave_with_master;
-
-# verify that if f2 does only read on an auto_inc table, this does not
-# switch to RBB
-connection master;
-drop function f2;
-delimiter |;
-create function f2 (x varchar(100)) returns int deterministic
-begin
- declare y int;
- insert into t1 values(null,x);
- set y = (select count(*) from t2);
- return y;
-end|
-delimiter ;|
-select f1("try_53_"),f2("try_54_");
-sync_slave_with_master;
-
-# And now, a normal statement with a trigger (no stored functions)
-
-connection master;
-drop function f2;
-delimiter |;
-create trigger t1_bi before insert on t1 for each row
-begin
- insert into t2 values(null,"try_55_");
-end|
-delimiter ;|
-insert into t1 values(null,"try_56_");
-# and now remove one auto_increment and verify SBB
-alter table t1 modify a int, drop primary key;
-insert into t1 values(null,"try_57_");
-sync_slave_with_master;
-
-# Test for BUG#20499 "mixed mode with temporary table breaks binlog"
-# Slave used to have only 2 rows instead of 3.
-connection master;
-CREATE TEMPORARY TABLE t15 SELECT UUID();
-create table t16 like t15;
-INSERT INTO t16 SELECT * FROM t15;
-# we'll verify that this one is done RBB
-insert into t16 values("try_65_");
-drop table t15;
-# we'll verify that this one is done SBB
-insert into t16 values("try_66_");
-sync_slave_with_master;
-
-# and now compare:
-
-connection master;
-
-# first check that data on master is sensible
-select count(*) from t1;
-select count(*) from t2;
-select count(*) from t3;
-select count(*) from t4;
-select count(*) from t5;
-select count(*) from t11;
-select count(*) from t20;
-select count(*) from t21;
-select count(*) from t22;
-select count(*) from t12;
-select count(*) from t13;
-select count(*) from t14;
-select count(*) from t16;
-if ($you_want_to_test_UDF)
-{
- select count(*) from t6;
- select count(*) from t7;
- select count(*) from t8;
- select count(*) from t9;
-}
-
-sync_slave_with_master;
-
-#
-# Bug#20863 If binlog format is changed between update and unlock of
-# tables, wrong binlog
-#
-
-connection master;
-DROP TABLE IF EXISTS t11;
-SET SESSION BINLOG_FORMAT=STATEMENT;
-CREATE TABLE t11 (song VARCHAR(255));
-LOCK TABLES t11 WRITE;
-SET SESSION BINLOG_FORMAT=ROW;
-INSERT INTO t11 VALUES('Several Species of Small Furry Animals Gathered Together in a Cave and Grooving With a Pict');
-SET SESSION BINLOG_FORMAT=STATEMENT;
-INSERT INTO t11 VALUES('Careful With That Axe, Eugene');
-UNLOCK TABLES;
-
---query_vertical SELECT * FROM t11
-sync_slave_with_master;
-USE mysqltest1;
---query_vertical SELECT * FROM t11
-
-connection master;
-DROP TABLE IF EXISTS t12;
-SET SESSION BINLOG_FORMAT=MIXED;
-CREATE TABLE t12 (data LONG);
-LOCK TABLES t12 WRITE;
-INSERT INTO t12 VALUES(UUID());
-UNLOCK TABLES;
-
---disable_result_log
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-show binlog events;
---enable_result_log
-sync_slave_with_master;
-
-# as we're using UUID we don't SELECT but use "diff" like in rpl_row_UUID
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
---exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql
-
-# Let's compare. Note: If they match test will pass, if they do not match
-# the test will show that the diff statement failed and not reject file
-# will be created. You will need to go to the mysql-test dir and diff
-# the files your self to see what is not matching
-
-diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
-
-connection master;
---disable_result_log
---replace_column 2 # 5 #
---replace_regex /table_id: [0-9]+/table_id: #/
-show binlog events;
---enable_result_log
-
-# Now test that mysqlbinlog works fine on a binlog generated by the
-# mixed mode
-
-# BUG#11312 "DELIMITER is not written to the binary log that causes
-# syntax error" makes that mysqlbinlog will fail if we pass it the
-# text of queries; this forces us to use --base64-output here.
-
-# BUG#20929 "BINLOG command causes invalid free plus assertion
-# failure" makes mysqld segfault when receiving --base64-output
-
-# So I can't enable this piece of test
-# SIGH
-
-if ($enable_when_11312_or_20929_fixed)
-{
---exec $MYSQL_BINLOG --base64-output $MYSQLTEST_VARDIR/log/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
-drop database mysqltest1;
---exec $MYSQL < $MYSQLTEST_VARDIR/tmp/mysqlbinlog_mixed.sql
---exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info mysqltest1 > $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql
-# the old mysqldump output on slave is the same as what it was on
-# master before restoring on master.
-diff_files $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_master.sql $MYSQLTEST_VARDIR/tmp/rpl_switch_stm_row_mixed_slave.sql;
-}
-
-drop database mysqltest1;
-sync_slave_with_master;
-
-connection default;
-set global binlog_format= @saved_binlog_format;
+--source suite/rpl/include/rpl_switch_stm_row_mixed.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_temp_table.test b/mysql-test/suite/engines/funcs/t/rpl_temp_table.test
index c13470f20b6..43b5ce49129 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_temp_table.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_temp_table.test
@@ -1,69 +1,2 @@
-# drop table t1 t2 t3 are included int master-slave.inc
-# meaningful only in statement-based:
+--source suite/rpl/t/rpl_temp_table.test
--- source include/have_binlog_format_mixed_or_statement.inc
--- source include/master-slave.inc
-
---disable_query_log
-CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-
-create table t2 (n int, PRIMARY KEY(n));
-create temporary table t1 (n int);
-create temporary table t3 (n int not null);
-
-insert into t1 values(1),(2),(3),(100),(25),(26),(200),(300);
---disable_warnings
-insert into t2 select * from t1;
---enable_warnings
-alter table t3 add primary key(n);
-
-flush logs;
-insert into t3 values (1010);
---disable_warnings
-insert into t2 select * from t3;
---enable_warnings
-
-drop table if exists t3;
-insert into t2 values (1012);
-
-connection master1;
-create temporary table t1 (n int);
-insert into t1 values (4),(5);
---disable_warnings
-insert into t2 select * from t1;
---enable_warnings
-
-save_master_pos;
-disconnect master;
-
-connection slave;
-#add 1 to the saved position, so we will catch drop table on disconnect
-#for sure
-sync_with_master 1;
-
-connection master1;
-insert into t2 values(61);
-
-save_master_pos;
-disconnect master1;
-
-connection slave;
-#same trick - make sure we catch drop of temporary table on disconnect
-sync_with_master 1;
-
-select * from t2;
-select count(*) from t2;
-select sum(n) from t2;
-show status like 'Slave_open_temp_tables';
-
-#
-# Clean up
-#
-connect (master2,localhost,root,,);
-connection master2;
-drop table if exists t1,t2;
-save_master_pos;
-connection slave;
-sync_with_master;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_temporary.test b/mysql-test/suite/engines/funcs/t/rpl_temporary.test
index aa90a5153bf..460ea55a675 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_temporary.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_temporary.test
@@ -1,235 +1,2 @@
+--source suite/rpl/t/rpl_temporary.test
--- source include/master-slave.inc
-
-# Clean up old slave's binlogs.
-# The slave is started with --log-slave-updates
-# and this test does SHOW BINLOG EVENTS on the slave's
-# binlog. But previous tests can influence the current test's
-# binlog (e.g. a temporary table in the previous test has not
-# been explicitly deleted, or it has but the slave hasn't had
-# enough time to catch it before STOP SLAVE,
-# and at the beginning of the current
-# test the slave immediately writes DROP TEMPORARY TABLE this_old_table).
-# We wait for the slave to have written all he wants to the binlog
-# (otherwise RESET MASTER may come too early).
-save_master_pos;
-connection slave;
-sync_with_master;
-reset master;
-connection master;
-
---disable_query_log
-CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
-connect (con1,localhost,root,,);
-
-#added on 2007/5/18
-connection con1;
-CREATE USER ''@localhost;
-
-connect (con2,localhost,root,,);
-# We want to connect as an unprivileged user. But if we use user="" then this
-# will pick the Unix login, which will cause problems if you're running the test
-# as root.
-connect (con3,localhost,zedjzlcsjhd,,);
-
-# We are going to use SET PSEUDO_THREAD_ID in this test;
-# check that it requires the SUPER privilege.
-
-connection con3;
-SET @save_select_limit=@@session.sql_select_limit;
---error 1227
-SET @@session.sql_select_limit=10, @@session.pseudo_thread_id=100;
-SELECT @@session.sql_select_limit = @save_select_limit; #shouldn't have changed
-# While we are here we also test that SQL_LOG_BIN can't be set
---error 1227
-SET @@session.sql_select_limit=10, @@session.sql_log_bin=0;
-SELECT @@session.sql_select_limit = @save_select_limit; #shouldn't have changed
-# Now as root, to be sure it works
-connection con2;
-SET @@session.pseudo_thread_id=100;
-SET @@session.pseudo_thread_id=connection_id();
-SET @@session.sql_log_bin=0;
-SET @@session.sql_log_bin=1;
-
-connection con3;
-let $VERSION=`select version()`;
-
---disable_warnings
-drop table if exists t1,t2;
---enable_warnings
-
-create table t1(f int);
-create table t2(f int);
-insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
-
-connection con1;
-create temporary table t3(f int);
---disable_warnings
-insert into t3 select * from t1 where f<6;
---enable_warnings
-sleep 1;
-
-connection con2;
-create temporary table t3(f int);
-sleep 1;
-
-connection con1;
---disable_warnings
-insert into t2 select count(*) from t3;
---enable_warnings
-sleep 1;
-
-connection con2;
---disable_warnings
-insert into t3 select * from t1 where f>=4;
---enable_warnings
-sleep 1;
-
-connection con1;
-drop temporary table t3;
-sleep 1;
-
-connection con2;
---disable_warnings
-insert into t2 select count(*) from t3;
---enable_warnings
-drop temporary table t3;
-
-select * from t2 ORDER BY f;
-
-# Commented out 8/30/2005 to make compatable with both sbr and rbr
-#--replace_result $VERSION VERSION
-#--replace_column 2 # 5 #
-#show binlog events;
-
-drop table t1, t2;
-
-use test;
-SET TIMESTAMP=1040323920;
-create table t1(f int);
-SET TIMESTAMP=1040323931;
-create table t2(f int);
-SET TIMESTAMP=1040323938;
-insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
-
-SET TIMESTAMP=1040323945;
-SET @@session.pseudo_thread_id=1;
-create temporary table t3(f int);
-SET TIMESTAMP=1040323952;
-SET @@session.pseudo_thread_id=1;
---disable_warnings
-insert into t3 select * from t1 where f<6;
---enable_warnings
-SET TIMESTAMP=1040324145;
-SET @@session.pseudo_thread_id=2;
-create temporary table t3(f int);
-SET TIMESTAMP=1040324186;
-SET @@session.pseudo_thread_id=1;
---disable_warnings
-insert into t2 select count(*) from t3;
---enable_warnings
-SET TIMESTAMP=1040324200;
-SET @@session.pseudo_thread_id=2;
---disable_warnings
-insert into t3 select * from t1 where f>=4;
---enable_warnings
-SET TIMESTAMP=1040324211;
-SET @@session.pseudo_thread_id=1;
-drop temporary table t3;
-SET TIMESTAMP=1040324219;
-SET @@session.pseudo_thread_id=2;
---disable_warnings
-insert into t2 select count(*) from t3;
---enable_warnings
-SET TIMESTAMP=1040324224;
-SET @@session.pseudo_thread_id=2;
-drop temporary table t3;
-
-select * from t2 ORDER BY f;
-drop table t1,t2;
-
-# Create last a temporary table that is not dropped at end to ensure that we
-# don't get any memory leaks for this
-
-create temporary table t3 (f int);
-#sync_with_master;
-
-# The server will now close done
-
-#
-# Bug#17284 erroneous temp table cleanup on slave
-#
-
-connection master;
-create temporary table t4 (f int);
-create table t5 (f int);
-#sync_with_master;
-# find dumper's $id
-select id from information_schema.processlist where command='Binlog Dump' into @id;
-kill @id; # to stimulate reconnection by slave w/o timeout
---disable_warnings
-insert into t5 select * from t4;
---enable_warnings
-save_master_pos;
-
-connection slave;
-sync_with_master;
-select * from t5 /* must be 1 after reconnection */;
-
-connection master;
-drop temporary table t4;
-drop table t5;
-
-#
-# BUG#17263 incorrect generation DROP temp tables
-# Temporary tables of connection are dropped in batches
-# where a batch correspond to pseudo_thread_id
-# value was set up at the moment of temp table creation
-#
-connection con1;
-set @@session.pseudo_thread_id=100;
-create temporary table t101 (id int);
-create temporary table t102 (id int);
-set @@session.pseudo_thread_id=200;
-create temporary table t201 (id int);
-create temporary table `t``201` (id int);
-# emulate internal temp table not to come to binlog
-create temporary table `#sql_not_user_table202` (id int);
-set @@session.pseudo_thread_id=300;
-create temporary table t301 (id int);
-create temporary table t302 (id int);
-create temporary table `#sql_not_user_table303` (id int);
-
-
-#Added on 2007/5/18
-DROP USER ''@localhost;
-
-disconnect con1;
-
-#now do something to show that slave is ok after DROP temp tables
-connection master;
-create table t1(f int);
-insert into t1 values (1);
-
-sync_slave_with_master;
-#connection slave;
-select * from t1 /* must be 1 */;
-
-connection master;
-drop table t1;
-
-#
-#14157: utf8 encoding in binlog without set character_set_client
-#
-exec $MYSQL --character-sets-dir=../sql/share/charsets/ --default-character-set=latin1 test -e "create table t1 (a int); set names latin1; create temporary table äöüÄÖÜ (a int); insert into äöüÄÖÜ values (1); insert into t1 select * from äöüÄÖÜ";
-
-sync_slave_with_master;
-#connection slave;
-select * from t1;
-
-connection master;
-drop table t1;
-
-# End of 5.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_trigger.test b/mysql-test/suite/engines/funcs/t/rpl_trigger.test
index 77cc7a3b2c0..4f61e738200 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_trigger.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_trigger.test
@@ -5,16 +5,7 @@
--source include/have_binlog_format_mixed_or_statement.inc
--source include/master-slave.inc
-disable_query_log;
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
-enable_query_log;
-
---disable_warnings
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
-
---enable_warnings
#
# #12482: Triggers has side effects with auto_increment values
@@ -45,19 +36,16 @@ SET @@RAND_SEED1=658490765, @@RAND_SEED2=635893186;
--disable_warnings
insert into t1 values(1,1,rand()),(NULL,2,rand());
insert into t2 (b) values(last_insert_id());
-insert into t2 values(3,0),(NULL,0);
-insert into t2 values(NULL,0),(500,0);
+insert into t2 values(3,0);
+insert into t2 values(NULL,0);
+insert into t2 values(NULL,0);
+insert into t2 values(500,0);
--enable_warnings
select a,b, truncate(rand_value,4) from t1;
select * from t2;
select a,name, old_a, old_b, truncate(rand_value,4) from t3;
-save_master_pos;
-connection slave;
-sync_with_master;
---disable_query_log
-select "--- On slave --" as "";
---enable_query_log
+--sync_slave_with_master
select a,b, truncate(rand_value,4) from t1;
select * from t2;
select a,name, old_a, old_b, truncate(rand_value,4) from t3;
@@ -109,17 +97,14 @@ let $time=`select a from t1`;
# - dump definers on the slave;
SELECT routine_name, definer
-FROM information_schema.routines;
+FROM information_schema.routines
+WHERE routine_name = 'bug12480';
SELECT trigger_name, definer
-FROM information_schema.triggers;
+FROM information_schema.triggers
+WHERE trigger_name = 't1_first';
-save_master_pos;
-connection slave;
-sync_with_master;
---disable_query_log
-select "--- On slave --" as "";
---enable_query_log
+--sync_slave_with_master
# XXX: Definers of stored procedures and functions are not replicated. WL#2897
# (Complete definer support in the stored routines) addresses this issue. So,
@@ -127,10 +112,12 @@ select "--- On slave --" as "";
# item.
SELECT routine_name, definer
-FROM information_schema.routines;
+FROM information_schema.routines
+WHERE routine_name = 'bug12480';
SELECT trigger_name, definer
-FROM information_schema.triggers;
+FROM information_schema.triggers
+WHERE trigger_name = 't1_first';
select a=b && a=c from t1;
--disable_query_log
@@ -170,9 +157,7 @@ create database other;
use other;
insert into test.t1 values (1);
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
connection master;
use test;
@@ -219,7 +204,7 @@ while ($rnd)
dec $i;
}
- sync_slave_with_master;
+ --sync_slave_with_master
#connection slave;
eval select * from t1$rnd;
delimiter |;
@@ -258,7 +243,7 @@ while ($rnd)
dec $i;
}
- sync_slave_with_master;
+ --sync_slave_with_master
#connection slave;
eval SELECT * from t1$rnd /* must be f1 $max_rows ... 1 */;
eval SELECT * from t3$rnd /* must be f3 $max_rows * 100 ... 100 */;
@@ -270,6 +255,8 @@ while ($rnd)
connection master;
eval drop table t1$rnd;
+ --sync_slave_with_master
+ connection master;
dec $rnd;
}
@@ -300,26 +287,42 @@ while ($rnd)
# Stop the slave.
connection slave;
-STOP SLAVE;
+--source include/stop_slave.inc
# Replace master's binlog.
connection master;
+let $MYSQLD_DATADIR= `select @@datadir`;
FLUSH LOGS;
-let $DATADIR = `select @@datadir`;
-remove_file $DATADIR/master-bin.000001;
-copy_file $MYSQL_TEST_DIR/std_data/bug16266.000001 $DATADIR/master-bin.000001;
+
+# Stop master server
+--let $rpl_server_number= 1
+--source include/rpl_stop_server.inc
+
+# Replace binlog
+remove_file $MYSQLD_DATADIR/master-bin.000001;
+copy_file $MYSQL_TEST_DIR/std_data/bug16266.000001 $MYSQLD_DATADIR/master-bin.000001;
+
+--let $rpl_server_number= 1
+--source include/rpl_start_server.inc
+
+let $binlog_version= query_get_value(SHOW BINLOG EVENTS, Info, 1);
+
+
+# Make the slave to replay the new binlog.
+--echo --> Master binlog: $binlog_version
# Make the slave to replay the new binlog.
connection slave;
RESET SLAVE;
-START SLAVE;
+--source include/start_slave.inc
SELECT MASTER_POS_WAIT('master-bin.000001', 513) >= 0;
# Check that the replication succeeded.
SHOW TABLES LIKE 't_';
+--replace_column 6 #
SHOW TRIGGERS;
SELECT * FROM t1;
SELECT * FROM t2;
@@ -338,7 +341,7 @@ DROP TRIGGER trg1;
DROP TABLE t1;
DROP TABLE t2;
-STOP SLAVE;
+--source include/stop_slave.inc
RESET SLAVE;
# The master should be clean.
@@ -352,7 +355,7 @@ RESET MASTER;
# Restart slave.
connection slave;
-START SLAVE;
+--source include/start_slave.inc
#
@@ -367,7 +370,6 @@ START SLAVE;
--echo
--echo ---> Preparing environment...
---echo ---> connection: master
--connection master
--disable_warnings
@@ -378,12 +380,9 @@ DROP TABLE IF EXISTS t2;
--echo
--echo ---> Synchronizing slave with master...
---save_master_pos
---connection slave
---sync_with_master
+--sync_slave_with_master
--echo
---echo ---> connection: master
--connection master
# Test.
@@ -412,11 +411,7 @@ SELECT * FROM t2;
--echo
--echo ---> Synchronizing slave with master...
---save_master_pos
---connection slave
---sync_with_master
-
---echo ---> connection: master
+--sync_slave_with_master
--echo
--echo ---> Checking on slave...
@@ -427,7 +422,6 @@ SELECT * FROM t2;
# Cleanup.
--echo
---echo ---> connection: master
--connection master
--echo
@@ -436,9 +430,7 @@ SELECT * FROM t2;
DROP TABLE t1;
DROP TABLE t2;
---save_master_pos
---connection slave
---sync_with_master
+--sync_slave_with_master
--connection master
#
@@ -469,9 +461,7 @@ insert into t1 values (3, "c");
select * from t1;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
select * from t1;
@@ -481,6 +471,5 @@ drop table if exists t1,t11;
#
# End of tests
#
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_trunc_temp.test b/mysql-test/suite/engines/funcs/t/rpl_trunc_temp.test
index 28bcb0c06c3..fa072022824 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_trunc_temp.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_trunc_temp.test
@@ -1,35 +1,2 @@
-# Requires statement logging
--- source include/have_binlog_format_mixed_or_statement.inc
+--source suite/rpl/t/rpl_trunc_temp.test
-source include/master-slave.inc;
-
-#
-# Bug#17137 Running "truncate table" on temporary table
-# leaves the table open on a slave
-#
-
-create temporary table t1 (n int);
-insert into t1 values(1);
-sync_slave_with_master;
-show status like 'Slave_open_temp_tables';
-
-# Perform a delete from temp table
-connection master;
-delete from t1;
-sync_slave_with_master;
-show status like 'Slave_open_temp_tables';
-
-# Perform truncate on temp table
-connection master;
-truncate t1;
-sync_slave_with_master;
-show status like 'Slave_open_temp_tables';
-
-# Disconnect the master, temp table on slave should dissapear
-disconnect master;
---real_sleep 3 # time for DROP to be read by slave
-connection slave;
-show status like 'Slave_open_temp_tables';
-
-
-# End of 4.1 tests
diff --git a/mysql-test/suite/engines/funcs/t/rpl_user_variables.test b/mysql-test/suite/engines/funcs/t/rpl_user_variables.test
index 530cda3d87a..1b78f2b5fb5 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_user_variables.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_user_variables.test
@@ -1,57 +1,2 @@
-###################################
-#
-# Test of replicating user variables
-#
-###################################
+--source suite/rpl/t/rpl_user_variables.test
--- source include/master-slave.inc
-# Disable PS as the log positions differs
---disable_ps_protocol
-
-
-# Clean up old slave's binlogs.
-# The slave is started with --log-slave-updates
-# and this test does SHOW BINLOG EVENTS on the slave's
-# binlog. But previous tests can influence the current test's
-# binlog (e.g. a temporary table in the previous test has not
-# been explicitly deleted, or it has but the slave hasn't had
-# enough time to catch it before STOP SLAVE,
-# and at the beginning of the current
-# test the slave immediately writes DROP TEMPORARY TABLE this_old_table).
-# We wait for the slave to have written all he wants to the binlog
-# (otherwise RESET MASTER may come too early).
-save_master_pos;
-connection slave;
-sync_with_master;
-reset master;
-connection master;
-
-create table t1(n char(30));
-set @i1:=12345678901234, @i2:=-12345678901234, @i3:=0, @i4:=-1;
-set @s1:='This is a test', @r1:=12.5, @r2:=-12.5;
-set @n1:=null;
-set @s2:='', @s3:='abc\'def', @s4:= 'abc\\def', @s5:= 'abc''def';
-insert into t1 values (@i1), (@i2), (@i3), (@i4);
-insert into t1 values (@r1), (@r2);
-insert into t1 values (@s1), (@s2), (@s3), (@s4), (@s5);
-insert into t1 values (@n1);
-insert into t1 values (@n2); # not explicitly set before
-insert into t1 values (@a:=0), (@a:=@a+1), (@a:=@a+1);
-insert into t1 values (@a+(@b:=@a+1));
-set @q:='abc';
-insert t1 values (@q), (@q:=concat(@q, 'n1')), (@q:=concat(@q, 'n2'));
-set @a:=5;
-insert into t1 values (@a),(@a);
-# To flush the pending event, we add the following statement. RBR can
-# concatenate the result of several statements, which SBR cannot.
-select * from t1 where n = '<nonexistant>';
-connection master1; # see if variable is reset in binlog when thread changes
-insert into t1 values (@a),(@a),(@a*5);
-SELECT * FROM t1 ORDER BY n;
-sync_slave_with_master;
-SELECT * FROM t1 ORDER BY n;
-connection master;
-insert into t1 select * FROM (select @var1 union select @var2) AS t2;
-drop table t1;
-sync_slave_with_master;
-stop slave;
diff --git a/mysql-test/suite/engines/funcs/t/rpl_variables.test b/mysql-test/suite/engines/funcs/t/rpl_variables.test
index 031131a3f2b..ca612a5593b 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_variables.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_variables.test
@@ -1,4 +1,4 @@
-source include/master-slave.inc;
+--source include/master-slave.inc
# Init for restoration of variable values
set @my_slave_net_timeout =@@global.slave_net_timeout;
@@ -19,3 +19,4 @@ show variables like 'slave_skip_errors';
# Cleanup
set global slave_net_timeout=@my_slave_net_timeout;
set global sql_slave_skip_counter=@my_sql_slave_skip_counter;
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/engines/funcs/t/rpl_view-slave.opt b/mysql-test/suite/engines/funcs/t/rpl_view-slave.opt
deleted file mode 100644
index 79b3bf6174b..00000000000
--- a/mysql-test/suite/engines/funcs/t/rpl_view-slave.opt
+++ /dev/null
@@ -1 +0,0 @@
---replicate-ignore-table=test.foo
diff --git a/mysql-test/suite/engines/funcs/t/rpl_view.test b/mysql-test/suite/engines/funcs/t/rpl_view.test
index 3eff8f7550a..b81b22ece4e 100644
--- a/mysql-test/suite/engines/funcs/t/rpl_view.test
+++ b/mysql-test/suite/engines/funcs/t/rpl_view.test
@@ -1,155 +1,2 @@
-# NYI - row-based cannot use CREATE ... SELECT
+--source suite/rpl/t/rpl_view.test
-source include/master-slave.inc;
---disable_warnings
-drop table if exists t1,v1;
-drop view if exists t1,v1;
-sync_slave_with_master;
-reset master;
---enable_warnings
-
-#
-# Check that creation drop of view is replicated, also check replication of
-# updating of view
-#
-connection master;
-create table t1 (a int);
-insert into t1 values (1);
-create view v1 as select a from t1;
-insert into v1 values (2);
-select * from v1 order by a;
-sync_slave_with_master;
-# view already have to be on slave
-select * from v1 order by a;
-connection master;
-update v1 set a=3 where a=1;
-select * from v1 order by a;
-sync_slave_with_master;
-select * from v1 order by a;
-connection master;
-delete from v1 where a=2;
-select * from v1 order by a;
-sync_slave_with_master;
-select * from v1 order by a;
-connection master;
-# 'alter view' internally maped to creation, but still check that it works
-alter view v1 as select a as b from t1;
-sync_slave_with_master;
-select * from v1 order by 1;
-connection master;
-drop view v1;
-sync_slave_with_master;
-#error, because view have to be removed from slave
--- error 1146
-select * from v1 order by a;
-connection master;
-drop table t1;
-sync_slave_with_master;
-# Change Author: JBM
-# Change Date: 2005-12-22
-# Change: Commented out binlog events to work with SBR and RBR
-#--replace_column 2 # 5 #
-# show binlog events limit 1,100;
-
-#
-# BUG#20438: CREATE statements for views, stored routines and triggers can be
-# not replicable.
-#
-
---echo
---echo ---> Test for BUG#20438
-
-# Prepare environment.
-
---echo
---echo ---> Preparing environment...
---echo ---> connection: master
---connection master
-
---disable_warnings
-DROP TABLE IF EXISTS t1;
-DROP VIEW IF EXISTS v1;
---enable_warnings
-
---echo
---echo ---> Synchronizing slave with master...
-
---save_master_pos
---connection slave
---sync_with_master
-
---echo
---echo ---> connection: master
---connection master
-
-# Test.
-
---echo
---echo ---> Creating objects...
-
-CREATE TABLE t1(c INT);
-
-/*!50003 CREATE VIEW v1 AS SELECT * FROM t1 */;
-
---echo
---echo ---> Inserting value...
-
-INSERT INTO t1 VALUES(1);
-
---echo
---echo ---> Checking on master...
-
-SELECT * FROM t1;
-
---echo
---echo ---> Synchronizing slave with master...
-
---save_master_pos
---connection slave
---sync_with_master
-
---echo ---> connection: master
-
---echo
---echo ---> Checking on slave...
-
-SELECT * FROM t1;
-
-# Cleanup.
-
---echo
---echo ---> connection: master
---connection master
-
---echo
---echo ---> Cleaning up...
-
-DROP VIEW v1;
-DROP TABLE t1;
-
---save_master_pos
---connection slave
---sync_with_master
---connection master
-
-#
-# BUG#19419: "VIEW: View that the column name is different
-# by master and slave is made".
-#
-connection master;
-create table t1(a int, b int);
-insert into t1 values (1, 1), (1, 2), (1, 3);
-create view v1(a, b) as select a, sum(b) from t1 group by a;
-
-sync_slave_with_master;
-explain v1;
-show create table v1;
-select * from v1;
-
-connection master;
-drop table t1;
-drop view v1;
-
-sync_slave_with_master;
-
---echo End of 5.0 tests
diff --git a/mysql-test/suite/engines/iuds/r/insert_decimal.result b/mysql-test/suite/engines/iuds/r/insert_decimal.result
index f167712d048..2f174c5e70b 100644
--- a/mysql-test/suite/engines/iuds/r/insert_decimal.result
+++ b/mysql-test/suite/engines/iuds/r/insert_decimal.result
@@ -110,15 +110,12 @@ Warnings:
Warning 1264 Out of range value for column 'c1' at row 1
Warning 1264 Out of range value for column 'c2' at row 1
Warning 1264 Out of range value for column 'c3' at row 1
-Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t2`.`c1` at row 2
-Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t2`.`c2` at row 2
-Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t2`.`c3` at row 2
+Warning 1264 Out of range value for column 'c1' at row 2
+Warning 1264 Out of range value for column 'c2' at row 2
+Warning 1264 Out of range value for column 'c3' at row 2
Note 1265 Data truncated for column 'c1' at row 3
Note 1265 Data truncated for column 'c2' at row 3
Note 1265 Data truncated for column 'c3' at row 3
-Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t2`.`c1` at row 4
-Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t2`.`c2` at row 4
-Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t2`.`c3` at row 4
SELECT * FROM t1;
c1 c2 c3 c4
0.00000 -0.10000 0 13
@@ -142,7 +139,6 @@ c1 c2 c3 c4
0 0 0 15
0 0 0 26
0 0 0 29
-0 0 0 31
0 0 0 32
0 0 0 33
0 0 0 7
@@ -160,6 +156,7 @@ c1 c2 c3 c4
9999999999 9999999999 9999999999 25
9999999999 9999999999 9999999999 28
9999999999 9999999999 9999999999 30
+9999999999 9999999999 9999999999 31
SELECT count(*) as total_rows, min(c1) as min_value, max(c1) as max_value, sum(c1) as sum, avg(c1) as avg FROM t1;
total_rows min_value max_value sum avg
7 0.00000 99999.99999 212446.04999 30349.435712857
@@ -171,13 +168,13 @@ total_rows min_value max_value sum avg
7 0 111111111 111211212 18535202.0000
SELECT count(*) as total_rows, min(c1) as min_value, max(c1) as max_value, sum(c1) as sum, avg(c1) as avg FROM t2;
total_rows min_value max_value sum avg
-30 -9999999999 9999999999 21322222222 710740740.7333
+30 -9999999999 9999999999 31322222221 1044074074.0333
SELECT count(*) as total_rows, min(c2) as min_value, max(c2) as max_value, sum(c2) as sum, avg(c2) as avg FROM t2;
total_rows min_value max_value sum avg
-30 0 9999999999 33444444445 1114814814.8333
+30 0 9999999999 43444444444 1448148148.1333
SELECT count(*) as total_rows, min(c3) as min_value, max(c3) as max_value, sum(c3) as sum, avg(c3) as avg FROM t2;
total_rows min_value max_value sum avg
-30 -9999999999 9999999999 43322222220 1444074074.0000
+30 -9999999999 9999999999 53322222219 1777407407.3000
SELECT * FROM t1;
c1 c2 c3 c4
0.00000 -0.10000 0 13
diff --git a/mysql-test/suite/federated/assisted_discovery.result b/mysql-test/suite/federated/assisted_discovery.result
index 4818ff7bb02..e8d6663e9bc 100644
--- a/mysql-test/suite/federated/assisted_discovery.result
+++ b/mysql-test/suite/federated/assisted_discovery.result
@@ -13,8 +13,7 @@ CREATE TABLE t1 (
`name` varchar(32) default 'name')
DEFAULT CHARSET=latin1;
connection master;
-CREATE TABLE t1 ENGINE=FEDERATED
-CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/t1';
+CREATE TABLE t1 ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -38,6 +37,9 @@ id group a\\b a\\ name
1 1 2 NULL foo
2 1 2 NULL fee
DROP TABLE t1;
+#
+# MDEV-11311 Create federated table does not work as expected
+#
create table t1 (
a bigint(20) not null auto_increment,
b bigint(20) not null,
@@ -57,8 +59,7 @@ t1 CREATE TABLE `t1` (
KEY `b` (`b`,`c`,`d`(255))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
connection master;
-create table t1 engine=federated
-connection='mysql://root@127.0.0.1:SLAVE_PORT/test/t1';
+create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,6 +73,12 @@ t1 CREATE TABLE `t1` (
drop table t1;
connection slave;
drop table t1;
+#
+# MDEV-17227 Server crash in TABLE_SHARE::init_from_sql_statement_string upon table discovery with non-existent database
+#
+connection master;
+create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+ERROR HY000: Unable to connect to foreign data source: Table 'test.t1' doesn't exist
connection master;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
diff --git a/mysql-test/suite/federated/assisted_discovery.test b/mysql-test/suite/federated/assisted_discovery.test
index fa83a2a8e19..bd32878f811 100644
--- a/mysql-test/suite/federated/assisted_discovery.test
+++ b/mysql-test/suite/federated/assisted_discovery.test
@@ -13,9 +13,7 @@ CREATE TABLE t1 (
connection master;
---replace_result $SLAVE_MYPORT SLAVE_PORT
-eval CREATE TABLE t1 ENGINE=FEDERATED
- CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+evalp CREATE TABLE t1 ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
--replace_result $SLAVE_MYPORT SLAVE_PORT
SHOW CREATE TABLE t1;
@@ -30,9 +28,9 @@ connection slave;
SELECT * FROM t1;
DROP TABLE t1;
-#
-#
-#
+--echo #
+--echo # MDEV-11311 Create federated table does not work as expected
+--echo #
create table t1 (
a bigint(20) not null auto_increment,
b bigint(20) not null,
@@ -44,9 +42,7 @@ create table t1 (
show create table t1;
connection master;
---replace_result $SLAVE_MYPORT SLAVE_PORT
-eval create table t1 engine=federated
- connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+evalp create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
--replace_result $SLAVE_MYPORT SLAVE_PORT
show create table t1;
drop table t1;
@@ -54,5 +50,12 @@ drop table t1;
connection slave;
drop table t1;
+--echo #
+--echo # MDEV-17227 Server crash in TABLE_SHARE::init_from_sql_statement_string upon table discovery with non-existent database
+--echo #
+connection master;
+--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+evalp create table t1 engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/test/t1';
+
source include/federated_cleanup.inc;
diff --git a/mysql-test/suite/federated/federatedx.result b/mysql-test/suite/federated/federatedx.result
index 8345f56dba9..2c21e31afe9 100644
--- a/mysql-test/suite/federated/federatedx.result
+++ b/mysql-test/suite/federated/federatedx.result
@@ -2284,6 +2284,32 @@ connection master;
CREATE TABLE t1 (a INT) ENGINE=FEDERATED CONNECTION='mysql://@127.0.0.1:SLAVE_PORT/federated/t1';
ERROR HY000: Can't create federated table. Foreign data src error: database: 'federated' username: '' hostname: '127.0.0.1'
#
+# MDEV-17573 Assertion in federatedx on multi-update
+#
+create table t1 (
+x int,
+d datetime);
+create table t1f engine=FEDERATED connection='mysql://root@127.0.0.1:MASTER_MYPORT/test/t1';
+create table t2 (
+x int, y int,
+d datetime);
+create table t2f engine=FEDERATED connection='mysql://root@127.0.0.1:MASTER_MYPORT/test/t2';
+create table t3 (
+x int, y int, z int,
+d datetime);
+create table t3f engine=FEDERATED connection='mysql://root@127.0.0.1:MASTER_MYPORT/test/t3';
+insert into t1 values (1, "1990-01-01 00:00");
+insert into t1 values (1, "1991-01-01 11:11");
+insert into t2 values (2, 2, "1992-02-02 22:22");
+insert into t3 values (3, 3, 3, "1993-03-03 23:33");
+update t1f, t2f, t3f set t1f.x= 11, t2f.y= 22, t3f.z= 33;
+drop table t1f;
+drop table t2f;
+drop table t3f;
+drop table t1;
+drop table t2;
+drop table t3;
+#
# MDEV-21049 Segfault in create federatedx table with empty hostname
#
connection master;
diff --git a/mysql-test/suite/federated/federatedx.test b/mysql-test/suite/federated/federatedx.test
index fcc0178c024..51d34298626 100644
--- a/mysql-test/suite/federated/federatedx.test
+++ b/mysql-test/suite/federated/federatedx.test
@@ -2011,6 +2011,35 @@ connection master;
eval CREATE TABLE t1 (a INT) ENGINE=FEDERATED CONNECTION='mysql://@127.0.0.1:$SLAVE_MYPORT/federated/t1';
--echo #
+--echo # MDEV-17573 Assertion in federatedx on multi-update
+--echo #
+create table t1 (
+ x int,
+ d datetime);
+--replace_result $MASTER_MYPORT MASTER_MYPORT
+eval create table t1f engine=FEDERATED connection='mysql://root@127.0.0.1:$MASTER_MYPORT/test/t1';
+
+create table t2 (
+ x int, y int,
+ d datetime);
+--replace_result $MASTER_MYPORT MASTER_MYPORT
+eval create table t2f engine=FEDERATED connection='mysql://root@127.0.0.1:$MASTER_MYPORT/test/t2';
+
+create table t3 (
+ x int, y int, z int,
+ d datetime);
+--replace_result $MASTER_MYPORT MASTER_MYPORT
+eval create table t3f engine=FEDERATED connection='mysql://root@127.0.0.1:$MASTER_MYPORT/test/t3';
+
+insert into t1 values (1, "1990-01-01 00:00");
+insert into t1 values (1, "1991-01-01 11:11");
+insert into t2 values (2, 2, "1992-02-02 22:22");
+insert into t3 values (3, 3, 3, "1993-03-03 23:33");
+update t1f, t2f, t3f set t1f.x= 11, t2f.y= 22, t3f.z= 33;
+
+drop table t1f; drop table t2f; drop table t3f; drop table t1; drop table t2; drop table t3;
+
+--echo #
--echo # MDEV-21049 Segfault in create federatedx table with empty hostname
--echo #
connection master;
diff --git a/mysql-test/suite/funcs_1/r/is_check_constraints.result b/mysql-test/suite/funcs_1/r/is_check_constraints.result
index 37fd191d979..2436be8142f 100644
--- a/mysql-test/suite/funcs_1/r/is_check_constraints.result
+++ b/mysql-test/suite/funcs_1/r/is_check_constraints.result
@@ -90,7 +90,9 @@ CREATE TABLE t3
(
a int,
b int check (b>0), # field constraint named 'b'
-CONSTRAINT b check (b>10) # table constraint
+CONSTRAINT b check (b>10), # table constraint
+# `CHECK_CLAUSE` should allow more then `var(64)` constraints
+CONSTRAINT b1 check (b<123456789012345678901234567890123456789012345678901234567890123456789)
) ENGINE=InnoDB;
SELECT * from information_schema.check_constraints;
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA TABLE_NAME CONSTRAINT_NAME CHECK_CLAUSE
@@ -104,6 +106,7 @@ def foo t2 CHK_dates `start_date` is null
def foo t2 name char_length(`name`) > 2
def foo t3 b `b` > 0
def foo t3 b `b` > 10
+def foo t3 b1 `b` < 123456789012345678901234567890123456789012345678901234567890123456789
disconnect con1;
CONNECT con2, localhost, boo2,, test;
SELECT * from information_schema.check_constraints;
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is.result b/mysql-test/suite/funcs_1/r/is_columns_is.result
index e4797521889..a6e74cb2f05 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result
@@ -24,7 +24,7 @@ def information_schema CHARACTER_SETS CHARACTER_SET_NAME 1 '' NO varchar 32 96 N
def information_schema CHARACTER_SETS DEFAULT_COLLATE_NAME 2 '' NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select NEVER NULL
def information_schema CHARACTER_SETS DESCRIPTION 3 '' NO varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) select NEVER NULL
def information_schema CHARACTER_SETS MAXLEN 4 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(3) select NEVER NULL
-def information_schema CHECK_CONSTRAINTS CHECK_CLAUSE 5 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
+def information_schema CHECK_CONSTRAINTS CHECK_CLAUSE 5 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_CATALOG 1 '' NO varchar 512 1536 NULL NULL NULL utf8 utf8_general_ci varchar(512) select NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_NAME 4 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_SCHEMA 2 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
@@ -570,7 +570,7 @@ NULL information_schema CHARACTER_SETS MAXLEN bigint NULL NULL NULL NULL bigint(
3.0000 information_schema CHECK_CONSTRAINTS CONSTRAINT_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema CHECK_CONSTRAINTS TABLE_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema CHECK_CONSTRAINTS CONSTRAINT_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
-3.0000 information_schema CHECK_CONSTRAINTS CHECK_CLAUSE varchar 64 192 utf8 utf8_general_ci varchar(64)
+1.0000 information_schema CHECK_CONSTRAINTS CHECK_CLAUSE longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
3.0000 information_schema CLIENT_STATISTICS CLIENT varchar 64 192 utf8 utf8_general_ci varchar(64)
NULL information_schema CLIENT_STATISTICS TOTAL_CONNECTIONS bigint NULL NULL NULL NULL bigint(21)
NULL information_schema CLIENT_STATISTICS CONCURRENT_CONNECTIONS bigint NULL NULL NULL NULL bigint(21)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
index 4fc90b8433d..71765adb3fe 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
@@ -24,7 +24,7 @@ def information_schema CHARACTER_SETS CHARACTER_SET_NAME 1 '' NO varchar 32 96 N
def information_schema CHARACTER_SETS DEFAULT_COLLATE_NAME 2 '' NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) NEVER NULL
def information_schema CHARACTER_SETS DESCRIPTION 3 '' NO varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) NEVER NULL
def information_schema CHARACTER_SETS MAXLEN 4 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(3) NEVER NULL
-def information_schema CHECK_CONSTRAINTS CHECK_CLAUSE 5 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
+def information_schema CHECK_CONSTRAINTS CHECK_CLAUSE 5 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_CATALOG 1 '' NO varchar 512 1536 NULL NULL NULL utf8 utf8_general_ci varchar(512) NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_NAME 4 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
def information_schema CHECK_CONSTRAINTS CONSTRAINT_SCHEMA 2 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
@@ -570,7 +570,7 @@ NULL information_schema CHARACTER_SETS MAXLEN bigint NULL NULL NULL NULL bigint(
3.0000 information_schema CHECK_CONSTRAINTS CONSTRAINT_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema CHECK_CONSTRAINTS TABLE_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema CHECK_CONSTRAINTS CONSTRAINT_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
-3.0000 information_schema CHECK_CONSTRAINTS CHECK_CLAUSE varchar 64 192 utf8 utf8_general_ci varchar(64)
+1.0000 information_schema CHECK_CONSTRAINTS CHECK_CLAUSE longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
3.0000 information_schema CLIENT_STATISTICS CLIENT varchar 64 192 utf8 utf8_general_ci varchar(64)
NULL information_schema CLIENT_STATISTICS TOTAL_CONNECTIONS bigint NULL NULL NULL NULL bigint(21)
NULL information_schema CLIENT_STATISTICS CONCURRENT_CONNECTIONS bigint NULL NULL NULL NULL bigint(21)
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is.result b/mysql-test/suite/funcs_1/r/is_tables_is.result
index 9af3aa860a0..65dae5a0d6b 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result
@@ -91,9 +91,9 @@ TABLE_CATALOG def
TABLE_SCHEMA information_schema
TABLE_NAME CHECK_CONSTRAINTS
TABLE_TYPE SYSTEM VIEW
-ENGINE MEMORY
+ENGINE MYISAM_OR_MARIA
VERSION 11
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1157,9 +1157,9 @@ TABLE_CATALOG def
TABLE_SCHEMA information_schema
TABLE_NAME CHECK_CONSTRAINTS
TABLE_TYPE SYSTEM VIEW
-ENGINE MEMORY
+ENGINE MYISAM_OR_MARIA
VERSION 11
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
index 9af3aa860a0..65dae5a0d6b 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
@@ -91,9 +91,9 @@ TABLE_CATALOG def
TABLE_SCHEMA information_schema
TABLE_NAME CHECK_CONSTRAINTS
TABLE_TYPE SYSTEM VIEW
-ENGINE MEMORY
+ENGINE MYISAM_OR_MARIA
VERSION 11
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1157,9 +1157,9 @@ TABLE_CATALOG def
TABLE_SCHEMA information_schema
TABLE_NAME CHECK_CONSTRAINTS
TABLE_TYPE SYSTEM VIEW
-ENGINE MEMORY
+ENGINE MYISAM_OR_MARIA
VERSION 11
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
diff --git a/mysql-test/suite/funcs_1/t/is_check_constraints.test b/mysql-test/suite/funcs_1/t/is_check_constraints.test
index b539de67f73..dbd286e6239 100644
--- a/mysql-test/suite/funcs_1/t/is_check_constraints.test
+++ b/mysql-test/suite/funcs_1/t/is_check_constraints.test
@@ -69,7 +69,9 @@ CREATE TABLE t3
(
a int,
b int check (b>0), # field constraint named 'b'
-CONSTRAINT b check (b>10) # table constraint
+CONSTRAINT b check (b>10), # table constraint
+# `CHECK_CLAUSE` should allow more then `var(64)` constraints
+CONSTRAINT b1 check (b<123456789012345678901234567890123456789012345678901234567890123456789)
) ENGINE=InnoDB;
--sorted_result
SELECT * from information_schema.check_constraints;
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 046feac5566..eb90424cf84 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -13,35 +13,38 @@
GCF-1081 : MDEV-18283 Galera test failure on galera.GCF-1081
GCF-939 : MDEV-21520 galera.GCF-939
MDEV-20225 : MDEV-20886 galera.MDEV-20225
-MW-286 : MDEV-18464 Killing thread can cause mutex deadlock if done concurrently with Galera/replication victim kill
MW-328A : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002
MW-328B : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002
MW-329 : MDEV-19962 Galera test failure on MW-329
+galera_FK_duplicate_client_insert : MDEV-24473: galera.galera_FK_duplicate_client_insert MTR failed: SIGABRT. InnoDB: Conflicting lock on table. Assertion failure in lock0lock.cc
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()
+galera_bf_abort_at_after_statement : MDEV-21557: galera_bf_abort_at_after_statement MTR failed: query 'reap' succeeded - should have failed with errno 1213
galera_bf_abort_group_commit : MDEV-18282 Galera test failure on galera.galera_bf_abort_group_commit
-galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc
+galera_bf_lock_wait : MDEV-21597 wsrep::transaction::start_transaction(): Assertion `active() == false' failed
galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files
galera_ftwrl : MDEV-21525 galera.galera_ftwrl
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
galera_kill_largechanges : MDEV-18179 Galera test failure on galera.galera_kill_largechanges
-galera_kill_nochanges : MDEV-18280 Galera test failure on galera_split_brain and galera_kill_nochanges
galera_many_tables_nopk : MDEV-18182 Galera test failure on galera.galera_many_tables_nopk
galera_mdl_race : MDEV-21524 galera.galera_mdl_race
galera_parallel_simple : MDEV-20318 galera.galera_parallel_simple fails
galera_pc_ignore_sb : MDEV-20888 galera.galera_pc_ignore_sb
+galera_pc_recovery : MDEV-25199 cluster fails to start up
galera_shutdown_nonprim : MDEV-21493 galera.galera_shutdown_nonprim
-galera_split_brain : MDEV-18280 Galera test failure on galera_split_brain and galera_kill_nochanges
galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade
-galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key
galera_toi_ddl_nonconflicting : MDEV-21518 galera.galera_toi_ddl_nonconflicting
galera_toi_truncate : MDEV-22996 Hang on galera_toi_truncate test case
+galera_var_ignore_apply_errors : MDEV-20451: Lock wait timeout exceeded in galera_var_ignore_apply_errors
galera_var_node_address : MDEV-20485 Galera test failure
galera_var_notify_cmd : MDEV-21905 Galera test galera_var_notify_cmd causes hang
galera_var_reject_queries : assertion in inline_mysql_socket_send
galera_var_replicate_myisam_on : MDEV-24062 Galera test failure on galera_var_replicate_myisam_on
galera_var_retry_autocommit: MDEV-18181 Galera test failure on galera.galera_var_retry_autocommit
galera_wan : MDEV-17259 Test failure on galera.galera_wan
+mysql-wsrep#198 : MDEV-24446: galera.mysql-wsrep#198 MTR failed: query 'reap' failed: 2000: Unknown MySQL error
partition : MDEV-19958 Galera test failure on galera.partition
query_cache: MDEV-15805 Test failure on galera.query_cache
sql_log_bin : MDEV-21491 galera.sql_log_bin
-versioning_trx_id : MDEV-18590 galera.versioning_trx_id
+versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
+galera_wsrep_provider_unset_set: wsrep_provider is read-only for security reasons
+pxc-421: wsrep_provider is read-only for security reasons
diff --git a/mysql-test/suite/galera/include/galera_st_clean_slave.inc b/mysql-test/suite/galera/include/galera_st_clean_slave.inc
index 44cbf67fd12..de1842e7ccf 100644
--- a/mysql-test/suite/galera/include/galera_st_clean_slave.inc
+++ b/mysql-test/suite/galera/include/galera_st_clean_slave.inc
@@ -2,24 +2,26 @@
--echo This is accomplished by shutting down node #2 and removing its var directory before restarting it
--connection node_1
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
--echo Shutting down server ...
@@ -38,28 +40,28 @@ COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
--connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
--connection node_2
--echo Starting server ...
@@ -70,52 +72,56 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
--connection node_1
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
--connection node_1a_galera_st_clean_slave
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * from t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COMMIT;
-SET AUTOCOMMIT=ON;
--connection node_1
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * from t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc b/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
index 3ac52deb284..ecbd4bad188 100644
--- a/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
+++ b/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
@@ -1,27 +1,27 @@
--echo Performing State Transfer on a server that has been temporarily disconnected
--connection node_1
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
--connection node_2
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
---source suite/galera/include/galera_unload_provider.inc
+--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
@@ -29,31 +29,31 @@ COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
--connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
--connection node_2
---source suite/galera/include/galera_load_provider.inc
+--source suite/galera/include/galera_start_replication.inc
#
# client connections were killed by provider load, so have to re-open here
@@ -68,52 +68,55 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
--connection node_1
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
--connection node_1a_galera_st_disconnect_slave
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
-SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
-COMMIT;
SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
--connection node_1
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/include/galera_st_kill_slave.inc b/mysql-test/suite/galera/include/galera_st_kill_slave.inc
index a4d9e91e8be..c69dc1d7542 100644
--- a/mysql-test/suite/galera/include/galera_st_kill_slave.inc
+++ b/mysql-test/suite/galera/include/galera_st_kill_slave.inc
@@ -1,24 +1,26 @@
--echo Performing State Transfer on a server that has been killed and restarted
--connection node_1
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
--source include/kill_galera.inc
@@ -29,28 +31,28 @@ COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
--connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
--connection node_2
--let $galera_wsrep_recover_server_id=2
@@ -64,52 +66,56 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
--connection node_1
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
--connection node_1a_galera_st_kill_slave
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COMMIT;
-SET AUTOCOMMIT=ON;
--connection node_1
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc
index bb8c68bd181..22aa4f12f35 100644
--- a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc
+++ b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc
@@ -4,22 +4,24 @@ if ($have_debug) {
--echo while a DDL was in progress on it
--connection node_1
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
# Suspend the applier as it applies the ALTER TABLE
@@ -42,28 +44,28 @@ SET wsrep_sync_wait = 0;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-INSERT INTO t1 (f1) VALUES ('node1_committed_during');
+INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
+INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
+INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
+INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
+INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
--connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
--connection node_2
--let $galera_wsrep_recover_server_id=2
@@ -76,58 +78,66 @@ INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-INSERT INTO t1 (f1) VALUES ('node2_committed_after');
+INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
+INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
+INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
+INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
+INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
COMMIT;
--connection node_1
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-INSERT INTO t1 (f1) VALUES ('node1_committed_after');
+INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
+INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
+INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
+INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
+INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
COMMIT;
--connection node_1a_galera_st_kill_slave_ddl
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COMMIT;
-SET AUTOCOMMIT=ON;
--connection node_1
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-SELECT COUNT(*) = 35 FROM t1;
+
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+SELECT * FROM t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
SET GLOBAL debug_dbug = $debug_orig;
}
diff --git a/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc b/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc
index eeb6a15e0a3..7492e9f3579 100644
--- a/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc
+++ b/mysql-test/suite/galera/include/galera_st_shutdown_slave.inc
@@ -1,14 +1,14 @@
--echo Performing State Transfer on a server that has been shut down cleanly and restarted
--connection node_1
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
--connection node_2
@@ -17,11 +17,11 @@ COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
--echo Shutting down server ...
@@ -33,28 +33,28 @@ COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
--connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
--connection node_2
--echo Starting server ...
@@ -65,52 +65,57 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
--connection node_1
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
--connection node_1a_galera_st_shutdown_slave
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+SELECT * from t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COMMIT;
-SET AUTOCOMMIT=ON;
--connection node_1
--let $wait_condition = SELECT COUNT(*)=35 FROM t1
--source include/wait_condition.inc
-SELECT COUNT(*) = 35 FROM t1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+SELECT * from t1;
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
+
diff --git a/mysql-test/suite/galera/include/galera_load_provider.inc b/mysql-test/suite/galera/include/galera_start_replication.inc
index 0f843597d9c..b9b201106d7 100644
--- a/mysql-test/suite/galera/include/galera_load_provider.inc
+++ b/mysql-test/suite/galera/include/galera_start_replication.inc
@@ -1,8 +1,6 @@
--echo Loading wsrep provider ...
--disable_query_log
---eval SET GLOBAL wsrep_provider = '$wsrep_provider_orig';
-
#
# count occurences of successful node starts in error log
#
diff --git a/mysql-test/suite/galera/include/galera_unload_provider.inc b/mysql-test/suite/galera/include/galera_stop_replication.inc
index cd841f51fbc..ed7e9bc41f0 100644
--- a/mysql-test/suite/galera/include/galera_unload_provider.inc
+++ b/mysql-test/suite/galera/include/galera_stop_replication.inc
@@ -1,7 +1,6 @@
--echo Unloading wsrep provider ...
--let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address`
---let $wsrep_provider_orig = `SELECT @@wsrep_provider`
--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
--let $wsrep_error_log_orig = `SELECT @@log_error`
if(!$wsrep_log_error_orig)
@@ -12,4 +11,4 @@ if(!$wsrep_log_error_orig)
}
--let LOG_FILE= $wsrep_log_error_orig
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
diff --git a/mysql-test/suite/galera/r/MENT-1047.result b/mysql-test/suite/galera/r/MENT-1047.result
new file mode 100644
index 00000000000..76d58f6d52f
--- /dev/null
+++ b/mysql-test/suite/galera/r/MENT-1047.result
@@ -0,0 +1,4 @@
+connection node_2;
+connection node_1;
+XA START 'trx';
+ERROR 42000: This version of MariaDB doesn't yet support 'XA transactions with Galera replication'
diff --git a/mysql-test/suite/galera/r/galera_UK_conflict.result b/mysql-test/suite/galera/r/galera_UK_conflict.result
new file mode 100644
index 00000000000..44bb64c9d63
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_UK_conflict.result
@@ -0,0 +1,131 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+SET GLOBAL wsrep_slave_threads = 3;
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+connection node_1;
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION wsrep_sync_wait=0;
+connection node_2;
+INSERT INTO t1 VALUES (5, 5, 2);
+connection node_1a;
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (4, 4, 2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+10 10 0
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+connection node_2;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+10 10 0
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+DROP TABLE t1;
+test scenario 2
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+SET GLOBAL wsrep_slave_threads = 3;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+connection node_1a;
+SET SESSION wsrep_sync_wait=0;
+connection node_2;
+INSERT INTO t1 VALUES (5, 5, 2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_replay_cb";
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_replay_cb_reached";
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (4, 4, 2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_replay_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+connection node_1;
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+connection node_2;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+10 10 0
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_as_slave_replay.result b/mysql-test/suite/galera/r/galera_as_slave_replay.result
index 760617be5f7..3c2cea19179 100644
--- a/mysql-test/suite/galera/r/galera_as_slave_replay.result
+++ b/mysql-test/suite/galera/r/galera_as_slave_replay.result
@@ -1,10 +1,13 @@
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
+connection node_2;
connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_3;
RESET MASTER;
connection node_2a;
START SLAVE;
-connection node_1;
+connection node_3;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
INSERT INTO t1 VALUES (1, 'a');
INSERT INTO t1 VALUES (3, 'a');
@@ -18,15 +21,14 @@ f1 f2
UPDATE t1 SET f2 = 'c' WHERE f1 > 1;
connection node_2a;
SET SESSION wsrep_sync_wait = 0;
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
-connection node_3;
+connection node_1;
SET SESSION wsrep_sync_wait = 0;
connection node_2a;
-SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
-connection node_3;
-INSERT INTO test.t1 VALUES (2, 'b');
connection node_1;
+INSERT INTO test.t1 VALUES (2, 'b');
+connection node_3;
COMMIT;
connection node_2a;
SET SESSION wsrep_on = 0;
@@ -35,8 +37,8 @@ SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
connection node_2a;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
-connection node_1;
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+connection node_3;
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
COUNT(*) = 1
1
@@ -61,7 +63,7 @@ SET DEBUG_SYNC = "RESET";
#
# test phase with real abort
#
-connection node_1;
+connection node_3;
set binlog_format=ROW;
insert into t1 values (4, 'd');
SET AUTOCOMMIT=ON;
@@ -70,9 +72,9 @@ UPDATE t1 SET f2 = 'd' WHERE f1 = 3;
connection node_2a;
SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
-connection node_3;
-UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3;
connection node_1;
+UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3;
+connection node_3;
COMMIT;
connection node_2a;
SET GLOBAL debug_dbug = "";
@@ -90,6 +92,6 @@ set session wsrep_sync_wait=0;
STOP SLAVE;
RESET SLAVE;
DROP TABLE t1;
-connection node_1;
+connection node_3;
DROP TABLE t1;
RESET MASTER;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ps.result b/mysql-test/suite/galera/r/galera_bf_abort_ps.result
new file mode 100644
index 00000000000..42292cb20a0
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_bf_abort_ps.result
@@ -0,0 +1,16 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,'node_2');
+connection node_1;
+INSERT INTO t1 VALUES (1,'node_1');
+connection node_2a;
+connection node_2;
+INSERT INTO t1 VALUES (2, 'node_2');
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+wsrep_local_aborts_increment
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result b/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result
new file mode 100644
index 00000000000..7482e76778e
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result
@@ -0,0 +1,22 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,'node_2');
+connection node_2a;
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
+connection node_1;
+INSERT INTO t1 VALUES (1,'node_1');
+connection node_2a;
+SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+connection node_2;
+SET DEBUG_SYNC = "wsrep_before_before_command SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort";
+INSERT INTO t1 VALUES (2, 'node_2');
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+wsrep_local_aborts_increment
+1
+SET DEBUG_SYNC = 'RESET';
+SET GLOBAL debug_dbug = DEFAULT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ctas.result b/mysql-test/suite/galera/r/galera_ctas.result
new file mode 100644
index 00000000000..f044f807410
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_ctas.result
@@ -0,0 +1,88 @@
+connection node_2;
+connection node_1;
+connection node_1;
+create table t1_Aria(a int, count int, b int, key(b)) engine=Aria;
+INSERT INTO t1_Aria values (1,1,1);
+create table t1_MyISAM(a int, count int, b int, key(b)) engine=MyISAM;
+INSERT INTO t1_MyISAM values (1,1,1);
+create table t1_InnoDB(a int, count int, b int, key(b)) engine=InnoDB;
+INSERT INTO t1_InnoDB values (1,1,1);
+SET SESSION default_storage_engine=MyISAM;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t2, t3,t4;
+SET SESSION default_storage_engine=Aria;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+DROP TABLE t2, t3,t4;
+SET SESSION default_storage_engine=InnoDB;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `a` int(11) DEFAULT NULL,
+ `count` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t2, t3,t4;
+DROP TABLE t1_MyISAM, t1_Aria,t1_InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
index 16db26c3c6b..808e32b8cb2 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
@@ -4,7 +4,6 @@ connection node_1;
# test phase with cascading foreign key through 3 tables
#
connection node_1;
-set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
@@ -26,15 +25,26 @@ INSERT INTO grandparent VALUES (1),(2);
INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
connection node_2;
-set wsrep_sync_wait=0;
DELETE FROM grandparent WHERE id = 1;
+SELECT * FROM grandparent;
+id
+2
+SELECT * FROM parent;
+id grandparent_id
+2 2
+SELECT * FROM child;
+id parent_id
+2 2
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM grandparent;
+id
+2
+SELECT * FROM parent;
+id grandparent_id
+2 2
+SELECT * FROM child;
+id parent_id
+2 2
DROP TABLE child;
DROP TABLE parent;
DROP TABLE grandparent;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
index bd76692b27c..e545da53855 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
@@ -20,12 +20,10 @@ INSERT INTO child VALUES (1,'row one'), (2,'row two');
connection node_2;
DELETE FROM parent;
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM parent;
+id
+SELECT * FROM child;
+id parent_id
DROP TABLE child;
DROP TABLE parent;
#
@@ -60,11 +58,9 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "RESET";
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM parent;
+id
+SELECT * FROM child;
+id j parent_id
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/r/galera_fulltext.result b/mysql-test/suite/galera/r/galera_fulltext.result
index a22296278fa..6c642757aa4 100644
--- a/mysql-test/suite/galera/r/galera_fulltext.result
+++ b/mysql-test/suite/galera/r/galera_fulltext.result
@@ -36,3 +36,31 @@ COUNT(f1) = 1000
1
DROP TABLE t1;
DROP TABLE ten;
+connection node_1;
+SET @value=REPEAT (1,5001);
+CREATE TABLE t (a VARCHAR(5000),FULLTEXT (a)) engine=innodb;
+INSERT IGNORE INTO t VALUES(@value);
+Warnings:
+Warning 1265 Data truncated for column 'a' at row 1
+SELECT COUNT(*) FROM t;
+COUNT(*)
+1
+connection node_2;
+SELECT COUNT(*) FROM t;
+COUNT(*)
+1
+connection node_1;
+DROP TABLE t;
+CREATE TABLE t (a VARCHAR(5000)) engine=innodb;
+INSERT IGNORE INTO t VALUES(@value);
+Warnings:
+Warning 1265 Data truncated for column 'a' at row 1
+SELECT COUNT(*) FROM t;
+COUNT(*)
+1
+connection node_2;
+SELECT COUNT(*) FROM t;
+COUNT(*)
+1
+connection node_1;
+DROP TABLE t;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
index fe54c515395..adf12c23e4a 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
@@ -1,27 +1,27 @@
---- galera_ist_mariabackup.result 2018-12-11 13:33:56.728535840 +0100
-+++ galera_ist_mariabackup.reject 2018-12-11 13:37:40.572535840 +0100
-@@ -290,3 +290,111 @@
+--- r/galera_ist_mariabackup.result 2021-04-10 14:21:16.141724901 +0300
++++ r/galera_ist_mariabackup,debug.reject 2021-04-10 14:49:04.455785652 +0300
+@@ -517,3 +517,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup.result b/mysql-test/suite/galera/r/galera_ist_mariabackup.result
index 13f7d898a59..5a71b490a80 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup.result
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup.result
@@ -4,49 +4,49 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been temporarily disconnected
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Unloading wsrep provider ...
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
disconnect node_2;
@@ -54,239 +54,466 @@ connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_disconnect_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
-COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
index 792f98b4427..c9457d70812 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_ist_mariabackup_innodb_flush_logs.result 2018-11-21 21:34:20.157054441 +0200
-+++ r/galera_ist_mariabackup_innodb_flush_logs.reject 2018-11-22 09:16:16.824604445 +0200
-@@ -94,3 +94,111 @@
+--- r/galera_ist_mariabackup_innodb_flush_logs.result 2021-04-10 14:21:52.661886653 +0300
++++ r/galera_ist_mariabackup_innodb_flush_logs,debug.reject 2021-04-10 14:49:56.740062774 +0300
+@@ -172,3 +172,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
index 99b9c8d6c1b..2060698bee0 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
@@ -2,97 +2,173 @@ connection node_2;
connection node_1;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_ist_mysqldump,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mysqldump,debug.rdiff
index 141b1ebd25f..e4e255deea2 100644
--- a/mysql-test/suite/galera/r/galera_ist_mysqldump,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mysqldump,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_ist_mysqldump.result 2018-11-22 14:25:28.551554055 +0200
-+++ r/galera_ist_mysqldump.reject 2018-11-22 15:46:33.119441931 +0200
-@@ -200,6 +200,114 @@
+--- r/galera_ist_mysqldump.result 2021-04-10 14:23:23.158282307 +0300
++++ r/galera_ist_mysqldump,debug.reject 2021-04-10 15:27:13.316299695 +0300
+@@ -354,6 +354,190 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
diff --git a/mysql-test/suite/galera/r/galera_ist_mysqldump.result b/mysql-test/suite/galera/r/galera_ist_mysqldump.result
index 222eb7704e8..6c57a571b85 100644
--- a/mysql-test/suite/galera/r/galera_ist_mysqldump.result
+++ b/mysql-test/suite/galera/r/galera_ist_mysqldump.result
@@ -14,194 +14,346 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
DROP USER sst;
diff --git a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
index 80d2c90642b..7cb6d90840e 100644
--- a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
+++ b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
@@ -5,8 +5,9 @@ connection node_2;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a');
connection node_2;
+SET SESSION wsrep_sync_wait=0;
Unloading wsrep provider ...
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
connection node_1;
UPDATE t1 SET f2 = 'b' WHERE f1 > 1;
UPDATE t1 SET f2 = 'c' WHERE f1 > 2;
diff --git a/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
index 260f8a8cab8..e76b37838fb 100644
--- a/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_ist_rsync.result 2018-09-11 12:38:42.027479411 +0300
-+++ r/galera_ist_rsync.reject 2018-09-17 10:50:16.527307668 +0300
-@@ -259,3 +259,111 @@
+--- r/galera_ist_rsync.result 2021-04-10 14:24:05.942467091 +0300
++++ r/galera_ist_rsync,debug.reject 2021-04-10 14:52:14.236776538 +0300
+@@ -517,3 +517,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_ist_rsync.result b/mysql-test/suite/galera/r/galera_ist_rsync.result
index 13f7d898a59..5a71b490a80 100644
--- a/mysql-test/suite/galera/r/galera_ist_rsync.result
+++ b/mysql-test/suite/galera/r/galera_ist_rsync.result
@@ -4,49 +4,49 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been temporarily disconnected
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Unloading wsrep provider ...
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
disconnect node_2;
@@ -54,239 +54,466 @@ connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_disconnect_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
-COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_kill_nochanges.result b/mysql-test/suite/galera/r/galera_kill_nochanges.result
index 8795608a303..f7478e29fd8 100644
--- a/mysql-test/suite/galera/r/galera_kill_nochanges.result
+++ b/mysql-test/suite/galera/r/galera_kill_nochanges.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_log_bin_opt.result b/mysql-test/suite/galera/r/galera_log_bin_opt.result
new file mode 100644
index 00000000000..160575df412
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_log_bin_opt.result
@@ -0,0 +1,80 @@
+connection node_2;
+connection node_1;
+connection node_1;
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+connection node_2;
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (id INT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (1);
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 2 FROM t2;
+COUNT(*) = 2
+1
+connection node_1;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t1)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t2 (id INT) ENGINE=InnoDB
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER
+connection node_2;
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+COUNT(*) = 2
+1
+include/show_binlog_events.inc
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t1)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t2 (id INT) ENGINE=InnoDB
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
+mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
+mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
+mysqld-bin.000001 # Xid # # COMMIT /* XID */
+mysqld-bin.000001 # Gtid # # GTID #-#-#
+mysqld-bin.000001 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER
+DROP TABLE t1;
+DROP TABLE t2;
+#cleanup
+connection node_1;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
diff --git a/mysql-test/suite/galera/r/galera_split_brain.result b/mysql-test/suite/galera/r/galera_split_brain.result
index 7c669e5516a..bd8c3a5bc44 100644
--- a/mysql-test/suite/galera/r/galera_split_brain.result
+++ b/mysql-test/suite/galera/r/galera_split_brain.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
call mtr.add_suppression("WSREP: TO isolation failed for: ");
@@ -6,7 +8,7 @@ connection node_2;
Killing server ...
connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
-ERROR 40001: WSREP replication failed. Check your wsrep connection state and retry the query.
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_2;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
disconnect node_2;
diff --git a/mysql-test/suite/galera/r/galera_ssl_upgrade.result b/mysql-test/suite/galera/r/galera_ssl_upgrade.result
index b24671d120d..8aab135c6a2 100644
--- a/mysql-test/suite/galera/r/galera_ssl_upgrade.result
+++ b/mysql-test/suite/galera/r/galera_ssl_upgrade.result
@@ -1,15 +1,25 @@
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown");
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
VARIABLE_VALUE = 'Synced'
1
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
+connection node_1;
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown.*");
+connection node_2;
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown.*");
+connection node_1;
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
+connection node_2;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
+connection node_1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
1
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff
index 819bcba7cac..bad8355b514 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_sst_mariabackup.result 2018-11-21 16:50:35.766982279 +0200
-+++ r/galera_sst_mariabackup.reject 2018-11-22 09:20:10.344408266 +0200
-@@ -286,5 +286,113 @@
+--- r/galera_sst_mariabackup.result 2021-04-10 14:25:04.142716409 +0300
++++ r/galera_sst_mariabackup,debug.reject 2021-04-10 14:53:30.033162191 +0300
+@@ -516,5 +516,189 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
disconnect node_2;
disconnect node_1;
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup.result b/mysql-test/suite/galera/r/galera_sst_mariabackup.result
index 4fdc283b286..caf602c017c 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup.result
@@ -4,289 +4,517 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
disconnect node_2;
disconnect node_1;
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
index f06da63561a..870b12de3c9 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_sst_mariabackup_data_dir.result 2018-12-12 13:59:56.525554689 +0100
-+++ r/galera_sst_mariabackup_data_dir.reject 2018-12-12 14:33:50.868181956 +0100
-@@ -286,5 +286,113 @@
+--- r/galera_sst_mariabackup_data_dir.result 2021-04-10 14:26:02.798965488 +0300
++++ r/galera_sst_mariabackup_data_dir,debug.reject 2021-04-10 14:54:44.825538224 +0300
+@@ -516,5 +516,189 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
disconnect node_2;
disconnect node_1;
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
index 4fdc283b286..caf602c017c 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
@@ -4,289 +4,517 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
disconnect node_2;
disconnect node_1;
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
index 2978411c8f7..e73a27ad0ac 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
@@ -1,43 +1,27 @@
---- galera_sst_mysqldump.result 2018-11-29 23:54:03.663607613 +0100
-+++ galera_sst_mysqldump,debug.reject 2018-11-29 23:55:42.377562815 +0100
-@@ -1,3 +1,5 @@
-+connection node_2;
-+connection node_1;
- Setting SST method to mysqldump ...
- call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
- call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
-@@ -56,6 +58,9 @@
- INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
- connection node_2;
- Loading wsrep provider ...
-+disconnect node_2;
-+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
-+connection node_2;
- SET AUTOCOMMIT=OFF;
- START TRANSACTION;
- INSERT INTO t1 VALUES ('node2_committed_after');
-@@ -390,6 +395,114 @@
+--- r/galera_sst_mysqldump.result 2021-04-18 13:15:29.909314729 +0300
++++ r/galera_sst_mysqldump.reject 2021-04-18 13:50:47.096965646 +0300
+@@ -698,6 +698,190 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -48,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -75,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff
deleted file mode 100644
index 3e8fee1b098..00000000000
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff
+++ /dev/null
@@ -1,18 +0,0 @@
---- suite/galera/r/galera_sst_mysqldump.result 2018-12-20 14:22:41.730134062 +0100
-+++ suite/galera/r/galera_sst_mysqldump.reject 2019-01-16 22:18:44.139781857 +0100
-@@ -1,3 +1,5 @@
-+connection node_2;
-+connection node_1;
- Setting SST method to mysqldump ...
- call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
- call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
-@@ -56,6 +58,9 @@
- INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
- connection node_2;
- Loading wsrep provider ...
-+disconnect node_2;
-+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
-+connection node_2;
- SET AUTOCOMMIT=OFF;
- START TRANSACTION;
- INSERT INTO t1 VALUES ('node2_committed_after');
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump.result b/mysql-test/suite/galera/r/galera_sst_mysqldump.result
index 4ed679ba477..e63b6f6f98d 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump.result
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
Setting SST method to mysqldump ...
call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
@@ -11,385 +13,691 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been temporarily disconnected
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Unloading wsrep provider ...
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
+disconnect node_2;
+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_disconnect_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
-COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
DROP USER sst;
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key,debug.rdiff
index aee525936d1..7d73f7a6ae6 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key,debug.rdiff
@@ -1,27 +1,27 @@
---- r/galera_sst_mysqldump_with_key.result 2019-07-04 09:39:54.993971174 +0300
-+++ r/galera_sst_mysqldump_with_key.reject 2019-07-04 09:55:34.171175305 +0300
-@@ -204,6 +204,114 @@
+--- r/galera_sst_mysqldump_with_key.result 2021-04-10 14:33:29.441606621 +0300
++++ r/galera_sst_mysqldump_with_key,debug.reject 2021-04-10 15:02:45.367881573 +0300
+@@ -358,6 +358,190 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result
index 21b912ac222..fcb250f02ce 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result
@@ -18,194 +18,346 @@ GRANT USAGE ON *.* TO sslsst REQUIRE SSL;
SET GLOBAL wsrep_sst_auth = 'sslsst:';
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
DROP USER sst;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff
index 94dd8c2e502..c4937bfb4ca 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff
@@ -1,27 +1,27 @@
---- galera_sst_rsync.result
-+++ galera_sst_rsync,debug.reject
-@@ -284,3 +284,111 @@
+--- r/galera_sst_rsync_data_dir.result 2021-04-10 14:35:28.090610315 +0300
++++ r/galera_sst_rsync_data_dir,debug.reject 2021-04-10 15:41:44.876068411 +0300
+@@ -516,3 +516,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync.result b/mysql-test/suite/galera/r/galera_sst_rsync.result
index d41d0d34e75..8b531bc6a3f 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync.result
@@ -4,287 +4,515 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
index 8ffe51c0cc3..14f67770572 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
@@ -1,33 +1,27 @@
---- galera_sst_rsync2.result 2018-11-29 17:57:53.288606346 +0100
-+++ galera_sst_rsync2,debug.reject 2018-11-29 18:00:01.172512000 +0100
-@@ -1,3 +1,5 @@
-+connection node_2;
-+connection node_1;
- connection node_1;
- connection node_2;
- Performing State Transfer on a server that has been shut down cleanly and restarted
-@@ -286,3 +288,111 @@
+--- r/galera_sst_rsync2.result 2021-04-10 14:34:48.646288119 +0300
++++ r/galera_sst_rsync2,debug.reject 2021-04-10 15:04:10.276286996 +0300
+@@ -516,3 +516,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -38,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -65,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync2.result b/mysql-test/suite/galera/r/galera_sst_rsync2.result
index d41d0d34e75..8b531bc6a3f 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync2.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync2.result
@@ -4,287 +4,515 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
index e307a2ff0f9..00b42d53b51 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
@@ -1,27 +1,27 @@
---- suite/galera/r/galera_sst_rsync_data_dir.result 2018-09-13 14:52:50.848220719 +0200
-+++ suite/galera/r/galera_sst_rsync_data_dir.reject 2018-09-13 15:03:32.339135247 +0200
-@@ -286,3 +286,111 @@
+--- r/galera_sst_rsync_data_dir.result 2021-04-10 14:35:28.090610315 +0300
++++ r/galera_sst_rsync_data_dir,debug.reject 2021-04-10 15:50:26.945234998 +0300
+@@ -516,3 +516,187 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,56 +59,132 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
index d41d0d34e75..8b531bc6a3f 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
@@ -4,287 +4,515 @@ connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_toi_lock_shared.result b/mysql-test/suite/galera/r/galera_toi_lock_shared.result
index fe1c88075d5..ec54d1019e9 100644
--- a/mysql-test/suite/galera/r/galera_toi_lock_shared.result
+++ b/mysql-test/suite/galera/r/galera_toi_lock_shared.result
@@ -6,12 +6,21 @@ connection node_2;
ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=SHARED;
connection node_1;
INSERT INTO t1 VALUES (2, 2);
-SELECT COUNT(*) = 2 FROM t1;
-COUNT(*) = 2
-1
+SELECT COUNT(*) AS EXPECT_2 FROM t1;
+EXPECT_2
+2
+SELECT * FROM t1;
+id f2
+1 NULL
+2 2
connection node_2;
INSERT INTO t1 VALUES (3, 3);
-SELECT COUNT(*) = 3 FROM t1;
-COUNT(*) = 3
-1
+SELECT COUNT(*) AS EXPECT_3 FROM t1;
+EXPECT_3
+3
+SELECT * FROM t1;
+id f2
+1 NULL
+2 2
+3 3
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_truncate.result b/mysql-test/suite/galera/r/galera_truncate.result
index c649d9bbaf9..c9a4bc854f8 100644
--- a/mysql-test/suite/galera/r/galera_truncate.result
+++ b/mysql-test/suite/galera/r/galera_truncate.result
@@ -32,6 +32,17 @@ SELECT AUTO_INCREMENT = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN ('t
AUTO_INCREMENT = 1
1
1
+connection node_1;
+TRUNCATE TABLE mysql.user;
+ERROR 42S02: Table 'mysql.user' doesn't exist
+TRUNCATE TABLE performance_schema.threads;
+ERROR HY000: Invalid performance_schema usage
+TRUNCATE TABLE information_schema.tables;
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
+TRUNCATE TABLE mysql.innodb_index_stats;
+TRUNCATE TABLE foo.bar;
+ERROR 42S02: Table 'foo.bar' doesn't exist
+TRUNCATE TABLE t1;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
diff --git a/mysql-test/suite/galera/r/galera_var_sst_auth.result b/mysql-test/suite/galera/r/galera_var_sst_auth.result
index 6a5683e2633..98d683c3b2d 100644
--- a/mysql-test/suite/galera/r/galera_var_sst_auth.result
+++ b/mysql-test/suite/galera/r/galera_var_sst_auth.result
@@ -1,8 +1,7 @@
connection node_2;
connection node_1;
-#
-# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
-#
+connection node_1;
+connection node_2;
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
********
@@ -10,5 +9,14 @@ SET @@global.wsrep_sst_auth='foo:bar';
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
********
-disconnect node_2;
-disconnect node_1;
+connection node_2;
+SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz';
+SELECT @@global.wsrep_sst_auth;
+@@global.wsrep_sst_auth
+********
+Shutdown node_2
+connection node_1;
+connection node_2;
+SELECT @@global.wsrep_sst_auth;
+@@global.wsrep_sst_auth
+********
diff --git a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
index 5323bc9bf60..88cc444106b 100644
--- a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
+++ b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
@@ -22,3 +22,106 @@ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 3;
COUNT(*) = 1
1
DROP TABLE t1;
+connection node_1;
+START TRANSACTION;
+SET SESSION wsrep_on=OFF;
+ERROR 25000: You are not allowed to execute this command in a transaction
+SET GLOBAL wsrep_on=OFF;
+ERROR 25000: You are not allowed to execute this command in a transaction
+COMMIT;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;;
+connection node_1a;
+SET GLOBAL wsrep_on = OFF;
+connection node_1;
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on ON
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on OFF
+INSERT INTO t1 VALUES (2);
+COMMIT;
+connection node_2;
+SET SESSION wsrep_sync_wait = 15;
+SELECT * FROM t1;
+f1
+1
+2
+connection node_1a;
+SET GLOBAL wsrep_on = ON;
+DROP TABLE t1;
+connection node_1;
+SET GLOBAL wsrep_on = OFF;
+connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;;
+connection node_1b;
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on OFF
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on OFF
+CREATE TABLE t2 (f1 INTEGER);
+DROP TABLE t2;
+SET GLOBAL wsrep_on = ON;
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on ON
+disconnect node_1b;
+connection node_1;
+SET GLOBAL wsrep_on = OFF;
+SET SESSION wsrep_on = ON;
+ERROR HY000: Can't enable @@session.wsrep_on, while @@global.wsrep_on is disabled
+SET GLOBAL wsrep_on = ON;
+SET SESSION wsrep_on = ON;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET GLOBAL wsrep_on = OFF;
+connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;;
+connection node_1b;
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on OFF
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+Variable_name Value
+wsrep_on OFF
+SET GLOBAL wsrep_on = ON;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+COMMIT;
+SELECT * FROM t1;
+f1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+DROP TABLE t1;
+connection node_1;
+SET SESSION wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+START TRANSACTION;
+INSERT INTO t1 VALUES (2);
+COMMIT;
+DROP TABLE t1;
+connection node_2;
+SHOW TABLES;
+Tables_in_test
+connection node_1;
+SET SESSION wsrep_on = ON;
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+START TRANSACTION;
+INSERT INTO t1 VALUES (2);
+COMMIT;
+connection node_2;
+SHOW TABLES;
+Tables_in_test
+connection node_1;
+DROP TABLE t1;
+SET GLOBAL wsrep_on = ON;
diff --git a/mysql-test/suite/galera/r/galera_var_wsrep_provider_options.result b/mysql-test/suite/galera/r/galera_var_wsrep_provider_options.result
new file mode 100644
index 00000000000..5ef4a23fd90
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_var_wsrep_provider_options.result
@@ -0,0 +1,10 @@
+connection node_2;
+connection node_1;
+call mtr.add_suppression("WSREP: Unknown parameter 'a'");
+call mtr.add_suppression("WSREP: Set options returned 7");
+SET GLOBAL wsrep_provider_options=NULL;
+ERROR HY000: Incorrect arguments to SET
+SET GLOBAL wsrep_provider_options='';
+SET GLOBAL wsrep_provider_options=' ';
+SET GLOBAL wsrep_provider_options='a=1';
+ERROR HY000: Incorrect arguments to SET
diff --git a/mysql-test/suite/sys_vars/r/wsrep_start_position_basic.result b/mysql-test/suite/galera/r/galera_var_wsrep_start_position.result
index a49e6135d47..3d409f90eac 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_start_position_basic.result
+++ b/mysql-test/suite/galera/r/galera_var_wsrep_start_position.result
@@ -1,7 +1,9 @@
+connection node_2;
+connection node_1;
#
# wsrep_start_position
#
-# save the initial value
+CALL mtr.add_suppression("WSREP: SST failed for position .*");
SET @wsrep_start_position_global_saved = @@global.wsrep_start_position;
# default
SELECT @@global.wsrep_start_position;
@@ -11,46 +13,92 @@ SELECT @@global.wsrep_start_position;
# scope
SELECT @@session.wsrep_start_position;
ERROR HY000: Variable 'wsrep_start_position' is a GLOBAL variable
-SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-1';
SELECT @@global.wsrep_start_position;
@@global.wsrep_start_position
00000000-0000-0000-0000-000000000000:-1
# valid values
-SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2';
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-1';
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
+
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:0';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '00000000-0000-0000-0000-000000000000:0'
SELECT @@global.wsrep_start_position;
@@global.wsrep_start_position
-00000000-0000-0000-0000-000000000000:-2
+00000000-0000-0000-0000-000000000000:-1
+# invalid values
SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:100'
SELECT @@global.wsrep_start_position;
@@global.wsrep_start_position
-12345678-1234-1234-1234-123456789012:100
-SET @@global.wsrep_start_position=default;
+00000000-0000-0000-0000-000000000000:-1
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '00000000-0000-0000-0000-000000000000:-2'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2A';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '00000000-0000-0000-0000-000000000000:-2A'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:0A';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '00000000-0000-0000-0000-000000000000:0A'
SELECT @@global.wsrep_start_position;
@@global.wsrep_start_position
00000000-0000-0000-0000-000000000000:-1
-
-# invalid values
SET @@global.wsrep_start_position='000000000000000-0000-0000-0000-000000000000:-1';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '000000000000000-0000-0000-0000-000000000000:-1'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='12345678-1234-1234-12345-123456789012:100';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-12345-123456789012:100'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='12345678-1234-123-12345-123456789012:0';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-123-12345-123456789012:0'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:_99999';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:_99999'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:a';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:a'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='OFF';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of 'OFF'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position=ON;
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of 'ON'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of ''
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position=NULL;
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of 'NULL'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
SET @@global.wsrep_start_position='junk';
ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of 'junk'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
# restore the initial value
SET @@global.wsrep_start_position = @wsrep_start_position_global_saved;
diff --git a/mysql-test/suite/galera/r/galera_virtual_blob.result b/mysql-test/suite/galera/r/galera_virtual_blob.result
new file mode 100644
index 00000000000..fd1c84f7083
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_virtual_blob.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t (f INT GENERATED ALWAYS AS (a+b)VIRTUAL,a INT,b INT,h BLOB);
+INSERT INTO t (a,b)VALUES(0,0), (0,0), (0,0), (0,0), (0,0);
+SELECT * from t;
+f a b h
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+connection node_2;
+SELECT * from t;
+f a b h
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+0 0 0 NULL
+connection node_1;
+DROP TABLE t;
diff --git a/mysql-test/suite/galera/r/galera_virtual_column.result b/mysql-test/suite/galera/r/galera_virtual_column.result
new file mode 100644
index 00000000000..71820ed8225
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_virtual_column.result
@@ -0,0 +1,19 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE p (id INT UNSIGNED PRIMARY KEY AUTO_INCREMENT) ENGINE = InnoDB;
+CREATE TABLE c (id INT UNSIGNED PRIMARY KEY AUTO_INCREMENT, pid INT UNSIGNED, bitmap TINYINT UNSIGNED NOT NULL DEFAULT 0, bitmap5 TINYINT UNSIGNED GENERATED ALWAYS AS (bitmap&(1<<5)) VIRTUAL, FOREIGN KEY (pid) REFERENCES p (id) ON DELETE CASCADE ON UPDATE CASCADE);
+CREATE INDEX bitmap5 ON c(bitmap5) USING BTREE;
+INSERT INTO p VALUES(1);
+INSERT INTO c(pid) VALUES(1);
+connection node_2;
+connection node_1;
+DELETE FROM p WHERE id=1;
+SELECT * FROM p;
+id
+SELECT * FROM c;
+id pid bitmap bitmap5
+connection node_2;
+connection node_1;
+DROP TABLE c;
+DROP TABLE p;
diff --git a/mysql-test/suite/galera/r/lp1376747-4.result b/mysql-test/suite/galera/r/lp1376747-4.result
index 6bbc24309ad..888961b592d 100644
--- a/mysql-test/suite/galera/r/lp1376747-4.result
+++ b/mysql-test/suite/galera/r/lp1376747-4.result
@@ -5,32 +5,34 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
SET session wsrep_sync_wait=0;
-FLUSH TABLE WITH READ LOCK;
+FLUSH TABLES WITH READ LOCK;
connection node_1;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
INSERT INTO t1 VALUES (2,3);
connection node_2a;
SET session wsrep_sync_wait=0;
-# node_1 DDL should not yet be applied
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2';
FLUSH TABLES t1 WITH READ LOCK;;
connection node_2;
+SET debug_sync='now WAIT_FOR parked2';
+SET debug_sync='now SIGNAL go2';
UNLOCK TABLES;
-# node_1 DDL should not yet be applied 2
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+set debug_sync= 'RESET';
connection node_2a;
UNLOCK TABLES;
-# node_1 DDL should be applied 2
+SET SESSION wsrep_sync_wait = DEFAULT;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#33,debug.rdiff b/mysql-test/suite/galera/r/mysql-wsrep#33,debug.rdiff
index d8fed3897fd..971f11bb8ec 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#33,debug.rdiff
+++ b/mysql-test/suite/galera/r/mysql-wsrep#33,debug.rdiff
@@ -1,27 +1,27 @@
---- r/mysql-wsrep#33.result 2020-02-13 15:14:31.871914684 +0200
-+++ r/mysql-wsrep#33.reject 2020-02-13 17:01:22.559450367 +0200
-@@ -395,6 +395,114 @@
+--- r/mysql-wsrep#33.result 2021-04-10 14:36:42.663191908 +0300
++++ r/mysql-wsrep#33,debug.reject 2021-04-10 15:43:02.420168969 +0300
+@@ -698,6 +698,190 @@
+ 1
DROP TABLE t1;
COMMIT;
- SET AUTOCOMMIT=ON;
+Performing State Transfer on a server that has been killed and restarted
+while a DDL was in progress on it
+connection node_1;
-+CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
++CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
-+INSERT INTO t1 VALUES ('node1_committed_before');
++INSERT INTO t1 VALUES (1,'node1_committed_before');
++INSERT INTO t1 VALUES (2,'node1_committed_before');
++INSERT INTO t1 VALUES (3,'node1_committed_before');
++INSERT INTO t1 VALUES (4,'node1_committed_before');
++INSERT INTO t1 VALUES (5,'node1_committed_before');
+connection node_2;
+START TRANSACTION;
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
-+INSERT INTO t1 VALUES ('node2_committed_before');
++INSERT INTO t1 VALUES (6,'node2_committed_before');
++INSERT INTO t1 VALUES (7,'node2_committed_before');
++INSERT INTO t1 VALUES (8,'node2_committed_before');
++INSERT INTO t1 VALUES (9,'node2_committed_before');
++INSERT INTO t1 VALUES (10,'node2_committed_before');
+COMMIT;
+SET GLOBAL debug_dbug = 'd,sync.alter_opened_table';
+connection node_1;
@@ -32,26 +32,26 @@
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (11,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (12,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (13,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (14,'node1_committed_during');
++INSERT INTO t1 (id,f1) VALUES (15,'node1_committed_during');
+COMMIT;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (16,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (17,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (18,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (19,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (20,'node1_to_be_committed_after');
+connect node_1a_galera_st_kill_slave_ddl, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (21,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (22,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (23,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (24,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (25,'node1_to_be_rollbacked_after');
+connection node_2;
+Performing --wsrep-recover ...
+connection node_2;
@@ -59,58 +59,134 @@
+Using --wsrep-start-position when starting mysqld ...
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (26,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (27,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (28,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (29,'node2_committed_after');
++INSERT INTO t1 (id,f1) VALUES (30,'node2_committed_after');
+COMMIT;
+connection node_1;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (31,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (32,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (33,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (34,'node1_to_be_committed_after');
++INSERT INTO t1 (id,f1) VALUES (35,'node1_to_be_committed_after');
+COMMIT;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
-+INSERT INTO t1 (f1) VALUES ('node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (36,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (37,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (38,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (39,'node1_committed_after');
++INSERT INTO t1 (id,f1) VALUES (40,'node1_committed_after');
+COMMIT;
+connection node_1a_galera_st_kill_slave_ddl;
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
-+INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (41,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (42,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (43,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (44,'node1_to_be_rollbacked_after');
++INSERT INTO t1 (id,f1) VALUES (45,'node1_to_be_rollbacked_after');
+ROLLBACK;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+COMMIT;
-+SET AUTOCOMMIT=ON;
+connection node_1;
-+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
-+COUNT(*) = 2
-+1
-+SELECT COUNT(*) = 35 FROM t1;
-+COUNT(*) = 35
-+1
++SET AUTOCOMMIT=ON;
++SET SESSION wsrep_sync_wait=15;
++SELECT COUNT(*) AS EXPECT_3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
++EXPECT_3
++3
++SELECT COUNT(*) AS EXPECT_35 FROM t1;
++EXPECT_35
++35
++SELECT * FROM t1;
++id f1 f2
++1 node1_committed_before NULL
++2 node1_committed_before NULL
++3 node1_committed_before NULL
++4 node1_committed_before NULL
++5 node1_committed_before NULL
++6 node2_committed_before NULL
++7 node2_committed_before NULL
++8 node2_committed_before NULL
++9 node2_committed_before NULL
++10 node2_committed_before NULL
++11 node1_committed_during NULL
++12 node1_committed_during NULL
++13 node1_committed_during NULL
++14 node1_committed_during NULL
++15 node1_committed_during NULL
++16 node1_to_be_committed_after NULL
++17 node1_to_be_committed_after NULL
++18 node1_to_be_committed_after NULL
++19 node1_to_be_committed_after NULL
++20 node1_to_be_committed_after NULL
++26 node2_committed_after NULL
++27 node2_committed_after NULL
++28 node2_committed_after NULL
++29 node2_committed_after NULL
++30 node2_committed_after NULL
++31 node1_to_be_committed_after NULL
++32 node1_to_be_committed_after NULL
++33 node1_to_be_committed_after NULL
++34 node1_to_be_committed_after NULL
++35 node1_to_be_committed_after NULL
++36 node1_committed_after NULL
++37 node1_committed_after NULL
++38 node1_committed_after NULL
++39 node1_committed_after NULL
++40 node1_committed_after NULL
+SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+COMMIT;
-+SET AUTOCOMMIT=ON;
+SET GLOBAL debug_dbug = $debug_orig;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#33.result b/mysql-test/suite/galera/r/mysql-wsrep#33.result
index fb0b593cc96..2c116347fca 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#33.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#33.result
@@ -13,49 +13,49 @@ connection node_2;
SET GLOBAL wsrep_sst_method = 'mysqldump';
Performing State Transfer on a server that has been temporarily disconnected
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Unloading wsrep provider ...
-SET GLOBAL wsrep_provider = 'none';
+SET GLOBAL wsrep_cluster_address = '';
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
disconnect node_2;
@@ -63,338 +63,641 @@ connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_disconnect_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
-COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been shut down cleanly and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_shutdown_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_shutdown_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_15 FROM t1;
+EXPECT_15
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that starts from a clean var directory
This is accomplished by shutting down node #2 and removing its var directory before restarting it
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Shutting down server ...
connection node_1;
Cleaning var directory ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_clean_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_clean_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (44,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * from t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
-CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
+CREATE TABLE t1 (id int not null primary key,f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
-INSERT INTO t1 VALUES ('node1_committed_before');
+INSERT INTO t1 VALUES (1,'node1_committed_before');
+INSERT INTO t1 VALUES (2,'node1_committed_before');
+INSERT INTO t1 VALUES (3,'node1_committed_before');
+INSERT INTO t1 VALUES (4,'node1_committed_before');
+INSERT INTO t1 VALUES (5,'node1_committed_before');
COMMIT;
connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
-INSERT INTO t1 VALUES ('node2_committed_before');
+INSERT INTO t1 VALUES (6,'node2_committed_before');
+INSERT INTO t1 VALUES (7,'node2_committed_before');
+INSERT INTO t1 VALUES (8,'node2_committed_before');
+INSERT INTO t1 VALUES (9,'node2_committed_before');
+INSERT INTO t1 VALUES (10,'node2_committed_before');
COMMIT;
Killing server ...
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
-INSERT INTO t1 VALUES ('node1_committed_during');
+INSERT INTO t1 VALUES (11,'node1_committed_during');
+INSERT INTO t1 VALUES (12,'node1_committed_during');
+INSERT INTO t1 VALUES (13,'node1_committed_during');
+INSERT INTO t1 VALUES (14,'node1_committed_during');
+INSERT INTO t1 VALUES (15,'node1_committed_during');
COMMIT;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (16,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (17,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (18,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (19,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (20,'node1_to_be_committed_after');
connect node_1a_galera_st_kill_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (21,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (22,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (23,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (24,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (25,'node1_to_be_rollbacked_after');
connection node_2;
Performing --wsrep-recover ...
Starting server ...
Using --wsrep-start-position when starting mysqld ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
-INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES (26,'node2_committed_after');
+INSERT INTO t1 VALUES (27,'node2_committed_after');
+INSERT INTO t1 VALUES (28,'node2_committed_after');
+INSERT INTO t1 VALUES (29,'node2_committed_after');
+INSERT INTO t1 VALUES (30,'node2_committed_after');
COMMIT;
connection node_1;
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
-INSERT INTO t1 VALUES ('node1_to_be_committed_after');
+INSERT INTO t1 VALUES (31,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (32,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (33,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (34,'node1_to_be_committed_after');
+INSERT INTO t1 VALUES (35,'node1_to_be_committed_after');
COMMIT;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
-INSERT INTO t1 VALUES ('node1_committed_after');
+INSERT INTO t1 VALUES (36,'node1_committed_after');
+INSERT INTO t1 VALUES (37,'node1_committed_after');
+INSERT INTO t1 VALUES (38,'node1_committed_after');
+INSERT INTO t1 VALUES (39,'node1_committed_after');
+INSERT INTO t1 VALUES (40,'node1_committed_after');
COMMIT;
connection node_1a_galera_st_kill_slave;
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
-INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (41,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (42,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (43,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (45,'node1_to_be_rollbacked_after');
+INSERT INTO t1 VALUES (46,'node1_to_be_rollbacked_after');
ROLLBACK;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_1;
-SELECT COUNT(*) = 35 FROM t1;
-COUNT(*) = 35
-1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_sync_wait=15;
+SELECT COUNT(*) AS EXPECT_35 FROM t1;
+EXPECT_35
+35
+SELECT * FROM t1;
+id f1
+1 node1_committed_before
+2 node1_committed_before
+3 node1_committed_before
+4 node1_committed_before
+5 node1_committed_before
+6 node2_committed_before
+7 node2_committed_before
+8 node2_committed_before
+9 node2_committed_before
+10 node2_committed_before
+11 node1_committed_during
+12 node1_committed_during
+13 node1_committed_during
+14 node1_committed_during
+15 node1_committed_during
+16 node1_to_be_committed_after
+17 node1_to_be_committed_after
+18 node1_to_be_committed_after
+19 node1_to_be_committed_after
+20 node1_to_be_committed_after
+26 node2_committed_after
+27 node2_committed_after
+28 node2_committed_after
+29 node2_committed_after
+30 node2_committed_after
+31 node1_to_be_committed_after
+32 node1_to_be_committed_after
+33 node1_to_be_committed_after
+34 node1_to_be_committed_after
+35 node1_to_be_committed_after
+36 node1_committed_after
+37 node1_committed_after
+38 node1_committed_after
+39 node1_committed_after
+40 node1_committed_after
SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1;
COUNT(*) = 0
1
DROP TABLE t1;
COMMIT;
-SET AUTOCOMMIT=ON;
connection node_2;
connection node_1;
CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
diff --git a/mysql-test/suite/galera/t/MDEV-16509.test b/mysql-test/suite/galera/t/MDEV-16509.test
index a17d7899939..ea4430d5e3f 100644
--- a/mysql-test/suite/galera/t/MDEV-16509.test
+++ b/mysql-test/suite/galera/t/MDEV-16509.test
@@ -5,7 +5,7 @@
--source include/galera_cluster.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
-
+--source include/galera_have_debug_sync.inc
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/t/MENT-1047.test b/mysql-test/suite/galera/t/MENT-1047.test
new file mode 100644
index 00000000000..1431818f806
--- /dev/null
+++ b/mysql-test/suite/galera/t/MENT-1047.test
@@ -0,0 +1,7 @@
+#
+# MENT-1047 - Reject XA with Galera replication
+#
+--source include/galera_cluster.inc
+
+--error ER_NOT_SUPPORTED_YET
+XA START 'trx';
diff --git a/mysql-test/suite/galera/t/galera#500.test b/mysql-test/suite/galera/t/galera#500.test
index 60f303b7103..471620b32c1 100644
--- a/mysql-test/suite/galera/t/galera#500.test
+++ b/mysql-test/suite/galera/t/galera#500.test
@@ -8,9 +8,10 @@
--source include/galera_cluster.inc
--source include/galera_have_debug_sync.inc
+# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
---source suite/galera/include/auto_increment_offset_save.inc
+--source include/auto_increment_offset_save.inc
# Force node_2 gcomm background thread to terminate via exception.
--connection node_2
@@ -41,4 +42,4 @@ SET SESSION wsrep_on=0;
--connection node_2
CALL mtr.add_suppression("WSREP: exception from gcomm, backend must be restarted: Gcomm backend termination was requested by setting gmcast.isolate=2.");
---source suite/galera/include/auto_increment_offset_restore.inc
+--source include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera/t/galera_UK_conflict.test b/mysql-test/suite/galera/t/galera_UK_conflict.test
new file mode 100644
index 00000000000..9978ba9b8bf
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_UK_conflict.test
@@ -0,0 +1,276 @@
+#
+# This test tests the operation of transaction replay with a scenario
+# where two subsequent write sets being applied conflict with local transaction
+# in commit phase. The conflict is "false positive" confict on GAP lock in
+# secondary unique index.
+# The first applier will cause BF abort for the local committer, which
+# starts replaying because of positive certification.
+# In buggy version, the test scenario continues so that while the local transaction
+# is replaying, the latter applier experiences similar UK GAP lock conflict
+# and forces the replayer to abort second time.
+# In fixed version, this latter replayer BF abort should not happen.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+
+--let $expected_wsrep_local_replays = `SELECT VARIABLE_VALUE+1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+
+# we will need 2 appliers threads for applyin two writes ets in parallel in node1
+# and 1 applier thread for handling replaying
+SET GLOBAL wsrep_slave_threads = 3;
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+
+--connection node_1
+# starting a transaction, which deletes and inserts the middle row in test table
+# this will be victim of false positive conflict with appliers
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+
+# Control connection to manage sync points for appliers
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET SESSION wsrep_sync_wait=0;
+
+# send from node 2 first an INSERT transaction, which will conflict on GAP lock in node 1
+--connection node_2
+INSERT INTO t1 VALUES (5, 5, 2);
+
+--connection node_1a
+# wait to see the INSERT in apply_cb sync point
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+# first applier seen in wait point, set sync point for the second INSERT
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+# send second insert into same GAP in test table
+INSERT INTO t1 VALUES (4, 4, 2);
+
+--connection node_1a
+# wait for the second insert to arrive in his sync point
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# both appliers are now waiting in separate sync points
+
+# Block the local commit, send the COMMIT and wait until it gets blocked
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+--send COMMIT
+
+--connection node_1a
+# wait for the local commit to enter in commit monitor wait state
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# release the local transaction to continue with commit
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# and now release the first applier, it should force local trx to abort
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+
+# wait for BF abort to happen and replaying begin
+--let $wait_condition = SELECT VARIABLE_VALUE= $expected_wsrep_local_replays FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
+--let $wait_condition_on_error_output= SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
+--source include/wait_condition_with_debug.inc
+
+# set another sync point for second applier
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+
+# letting the second appier to move forward
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+# waiting until second applier is in wait state
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+# stopping second applier before commit
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# releasing the second insert, with buggy version it will conflict with
+# replayer
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+
+# with fixed version, second applier has reached comit monitor, and we can
+# release it to complete
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# local commit should succeed
+--connection node_1
+--reap
+
+SELECT * FROM t1;
+
+# returning original slave thread count
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+
+--connection node_2
+SELECT * FROM t1;
+
+# replicate some transactions, so that wsrep slave thread count can reach
+# original state in node 1
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+
+DROP TABLE t1;
+
+##################################################################################
+# test scenario 2
+#
+# commit order is now: INSERT-1, local COMMIT, INSERT-2
+# while local trx is replaying, the latter applier has applied and is waiting
+# for commit.
+# The point in this scenario is to verify that replayer does not try to abort
+# the latter applier
+#################################################################################
+
+--echo test scenario 2
+
+--connection node_1
+--let $expected_wsrep_local_replays = `SELECT VARIABLE_VALUE+1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+
+# we will need 2 appliers threads for applyin two writes sets in parallel in node1
+# and 1 applier thread for handling replaying
+SET GLOBAL wsrep_slave_threads = 3;
+
+# set sync point for the first INSERT applier
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+# starting a transaction, which deletes and inserts the middle row in test table
+# this will be victim of false positive conflict with appliers
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+
+# Control connection to manage sync points for appliers
+--connection node_1a
+SET SESSION wsrep_sync_wait=0;
+
+# send from node 2 first an INSERT transaction, which will conflict on GAP lock in node 1
+--connection node_2
+INSERT INTO t1 VALUES (5, 5, 2);
+
+--connection node_1a
+# wait to see the INSERT in apply_cb sync point
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# Block the local commit, send the COMMIT and wait until it gets blocked
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+--send COMMIT
+
+--connection node_1a
+# wait for the local commit to enter in commit monitor wait state
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# set sync point before replaying
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_replay_cb";
+
+# release the local transaction to continue with commit
+# it should advance and end up waiting in commit monitor for his turn
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# and now release the first applier, it should force local trx to abort
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# wait for BF abort to happen and replaying begin
+--let $wait_condition = SELECT VARIABLE_VALUE= $expected_wsrep_local_replays FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
+--let $wait_condition_on_error_output= SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
+--source include/wait_condition_with_debug.inc
+
+# replayer should now be in stopped in sync point
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_replay_cb_reached";
+
+# set sync point for the second INSERT
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+# send second insert into same GAP in test table
+INSERT INTO t1 VALUES (4, 4, 2);
+
+--connection node_1a
+# wait for the second applier to enter in commit monitor wait state
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# and, letting the second appier to move forward, it will stop naturally
+# to wait for commit order after replayer's commit
+--let $galera_sync_point = commit_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# and now release the replayer, if all is good,it will commit before the second applier
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_replay_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+
+# local commit should succeed
+--connection node_1
+--reap
+
+--let $wait_condition = SELECT COUNT(*)=5 FROM t1;
+--source include/wait_condition.inc
+
+# returning original slave thread count
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+
+--connection node_2
+SELECT * FROM t1;
+
+# replicate some transactions, so that wsrep slave thread count can reach
+# original state in node 1
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_replay.test b/mysql-test/suite/galera/t/galera_as_slave_replay.test
index 93f95349e6d..47f70bda721 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_replay.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_replay.test
@@ -18,9 +18,10 @@
#--source suite/galera/include/galera_have_debug_sync.inc
#
-# node 1 is native MariaDB server operating as async replication master
+# node 3 is native MariaDB server operating as async replication master
#
---connection node_1
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_3
RESET MASTER;
--connection node_2a
@@ -31,14 +32,14 @@ RESET MASTER;
#
-# nodes 2 and 3 form a galera cluster, node 2 operates as slave for native MariaDB naster in node 1
+# nodes 1 and 2 form a galera cluster, node 2 operates as slave for native MariaDB naster in node 3
#
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
INSERT INTO t1 VALUES (1, 'a');
INSERT INTO t1 VALUES (3, 'a');
@@ -63,15 +64,14 @@ SET SESSION wsrep_sync_wait = 0;
--source include/wait_condition.inc
# wait for create table and inserts to be replicated in cluster
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
---connection node_3
+--connection node_1
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT COUNT(*) = 2 FROM test.t1;
--source include/wait_condition.inc
--connection node_2a
# Block the future commit of async replication
---let $galera_sync_point = commit_monitor_enter_sync
+--let $galera_sync_point = commit_monitor_master_enter_sync
--source include/galera_set_sync_point.inc
# block also the applier before applying begins
@@ -81,13 +81,13 @@ SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
# now inject a conflicting insert from node 3, it will replicate with
# earlier seqno (than async transaction) and pause before applying in node 2
#
---connection node_3
+--connection node_1
INSERT INTO test.t1 VALUES (2, 'b');
#
# send the update from master, this will succeed here, beceuase of async replication.
# async replication will apply this in node 2 and pause before commit phase,
---connection node_1
+--connection node_3
--error 0
COMMIT;
@@ -108,7 +108,7 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
--source include/galera_clear_sync_point.inc
--source include/galera_signal_sync_point.inc
---connection node_1
+--connection node_3
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
@@ -139,7 +139,7 @@ SET DEBUG_SYNC = "RESET";
--echo # test phase with real abort
--echo #
---connection node_1
+--connection node_3
set binlog_format=ROW;
@@ -163,11 +163,11 @@ UPDATE t1 SET f2 = 'd' WHERE f1 = 3;
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
# Inject a conflicting update from node 3
---connection node_3
+--connection node_1
UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3;
# send the update from master
---connection node_1
+--connection node_3
--error 0
COMMIT;
@@ -195,6 +195,6 @@ RESET SLAVE;
DROP TABLE t1;
---connection node_1
+--connection node_3
DROP TABLE t1;
RESET MASTER;
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf b/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf
new file mode 100644
index 00000000000..34c1a8cc3cf
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf
@@ -0,0 +1,3 @@
+!include ../galera_2nodes.cnf
+[mysqltest]
+ps-protocol \ No newline at end of file
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps.test b/mysql-test/suite/galera/t/galera_bf_abort_ps.test
new file mode 100644
index 00000000000..d2dfb92651e
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps.test
@@ -0,0 +1,34 @@
+#
+# MDEV-24255
+# Test BF abort of a transaction that has ps-protocol enabled
+#
+
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB;
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+--connection node_2
+--let $wsrep_local_bf_aborts_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,'node_2');
+
+--connection node_1
+INSERT INTO t1 VALUES (1,'node_1');
+
+--connection node_2a
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'node_1'
+--source include/wait_condition.inc
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (2, 'node_2');
+
+--let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
+
+--disable_query_log
+--eval SELECT $wsrep_local_bf_aborts_after - $wsrep_local_bf_aborts_before = 1 AS wsrep_local_aborts_increment;
+--enable_query_log
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf
new file mode 100644
index 00000000000..83baa995c17
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf
@@ -0,0 +1,7 @@
+!include ../galera_2nodes.cnf
+
+[mysqld]
+thread-handling=pool-of-threads
+
+[mysqltest]
+ps-protocol
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test
new file mode 100644
index 00000000000..56348a6f527
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test
@@ -0,0 +1,54 @@
+#
+# MDEV-24255
+# Test BF abort of a transaction that has ps-protocol enabled
+# This test stresses the case where wsrep_before_command()
+# finds the transaction in state s_must_abort. This only
+# possible when the server is using the thread pool.
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB;
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+--connection node_2
+--let $wsrep_local_bf_aborts_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,'node_2');
+
+--connection node_2a
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
+
+--connection node_1
+INSERT INTO t1 VALUES (1,'node_1');
+
+--connection node_2a
+SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+--connection node_2
+SET DEBUG_SYNC = "wsrep_before_before_command SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort";
+
+#
+# The following INSERT is expected to enter
+# wsrep_before_command() and find its transaction
+# in state s_must_abort.
+# Notice that the test appears more complicated
+# than it needs to... however we cannot use
+# --send for this INSERT, otherwise mysqltest
+# will not use ps-protocol
+#
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (2, 'node_2');
+
+--let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
+
+--disable_query_log
+--eval SELECT $wsrep_local_bf_aborts_after - $wsrep_local_bf_aborts_before = 1 AS wsrep_local_aborts_increment;
+--enable_query_log
+
+SET DEBUG_SYNC = 'RESET';
+SET GLOBAL debug_dbug = DEFAULT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_bf_kill_debug.test b/mysql-test/suite/galera/t/galera_bf_kill_debug.test
index b687a5a6a67..c322f283757 100644
--- a/mysql-test/suite/galera/t/galera_bf_kill_debug.test
+++ b/mysql-test/suite/galera/t/galera_bf_kill_debug.test
@@ -84,7 +84,7 @@ SET DEBUG_SYNC = "now SIGNAL continue_kill";
--reap
--connection node_2a
---error 0,1213
+--error 0,1213,2013
select * from t1;
--connection node_2
diff --git a/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf b/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf
new file mode 100644
index 00000000000..889c81b4a0a
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.cnf
@@ -0,0 +1,7 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+auto_increment_offset=1
+
+[mysqld.2]
+auto_increment_offset=2
diff --git a/mysql-test/suite/galera/t/galera_ctas.test b/mysql-test/suite/galera/t/galera_ctas.test
new file mode 100644
index 00000000000..8b9ad9c4a20
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_ctas.test
@@ -0,0 +1,39 @@
+--source include/galera_cluster.inc
+
+--connection node_1
+create table t1_Aria(a int, count int, b int, key(b)) engine=Aria;
+INSERT INTO t1_Aria values (1,1,1);
+create table t1_MyISAM(a int, count int, b int, key(b)) engine=MyISAM;
+INSERT INTO t1_MyISAM values (1,1,1);
+create table t1_InnoDB(a int, count int, b int, key(b)) engine=InnoDB;
+INSERT INTO t1_InnoDB values (1,1,1);
+
+SET SESSION default_storage_engine=MyISAM;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+SHOW CREATE TABLE t3;
+SHOW CREATE TABLE t4;
+DROP TABLE t2, t3,t4;
+
+SET SESSION default_storage_engine=Aria;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+SHOW CREATE TABLE t3;
+SHOW CREATE TABLE t4;
+DROP TABLE t2, t3,t4;
+
+SET SESSION default_storage_engine=InnoDB;
+CREATE TABLE t2 AS SELECT * FROM t1_Aria;
+CREATE TABLE t3 AS SELECT * FROM t1_MyISAM;
+CREATE TABLE t4 AS SELECT * FROM t1_InnoDB;
+SHOW CREATE TABLE t2;
+SHOW CREATE TABLE t3;
+SHOW CREATE TABLE t4;
+
+DROP TABLE t2, t3,t4;
+DROP TABLE t1_MyISAM, t1_Aria,t1_InnoDB;
+
diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
index a3e0dbcf36f..49b54f0f7f0 100644
--- a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
+++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
@@ -9,7 +9,6 @@
--echo #
--connection node_1
-set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
@@ -36,8 +35,12 @@ INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
--connection node_2
-set wsrep_sync_wait=0;
-
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'grandparent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM parent;
@@ -46,6 +49,10 @@ set wsrep_sync_wait=0;
--source include/wait_condition.inc
DELETE FROM grandparent WHERE id = 1;
+SELECT * FROM grandparent;
+SELECT * FROM parent;
+SELECT * FROM child;
+
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
@@ -53,8 +60,10 @@ DELETE FROM grandparent WHERE id = 1;
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM grandparent;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
-SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
+
+SELECT * FROM grandparent;
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
index 96e633f83d7..d902783ed64 100644
--- a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
+++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
@@ -25,16 +25,25 @@ INSERT INTO parent VALUES ('row one'), ('row two');
INSERT INTO child VALUES (1,'row one'), (2,'row two');
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 2 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
+
DELETE FROM parent;
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 0 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
@@ -62,6 +71,12 @@ INSERT INTO parent VALUES (1);
INSERT INTO child VALUES (1,0,1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
@@ -89,11 +104,13 @@ SET DEBUG_SYNC = "RESET";
--connection node_1
--reap
-
+--let $wait_condition = SELECT COUNT(*) = 0 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
+
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/t/galera_fulltext.test b/mysql-test/suite/galera/t/galera_fulltext.test
index a90cab1aa1a..19aa4f7a0a0 100644
--- a/mysql-test/suite/galera/t/galera_fulltext.test
+++ b/mysql-test/suite/galera/t/galera_fulltext.test
@@ -60,3 +60,26 @@ SELECT COUNT(f1) = 1000 FROM t1 WHERE MATCH(f1) AGAINST ('abcdefjhk');
DROP TABLE t1;
DROP TABLE ten;
+#
+# MDEV-24978 : SIGABRT in __libc_message
+#
+--connection node_1
+SET @value=REPEAT (1,5001);
+CREATE TABLE t (a VARCHAR(5000),FULLTEXT (a)) engine=innodb;
+INSERT IGNORE INTO t VALUES(@value);
+SELECT COUNT(*) FROM t;
+
+--connection node_2
+SELECT COUNT(*) FROM t;
+
+--connection node_1
+DROP TABLE t;
+CREATE TABLE t (a VARCHAR(5000)) engine=innodb;
+INSERT IGNORE INTO t VALUES(@value);
+SELECT COUNT(*) FROM t;
+
+--connection node_2
+SELECT COUNT(*) FROM t;
+
+--connection node_1
+DROP TABLE t;
diff --git a/mysql-test/suite/galera/t/galera_gcache_recover.cnf b/mysql-test/suite/galera/t/galera_gcache_recover.cnf
index c7b59b6a27e..34c757de77e 100644
--- a/mysql-test/suite/galera/t/galera_gcache_recover.cnf
+++ b/mysql-test/suite/galera/t/galera_gcache_recover.cnf
@@ -4,4 +4,4 @@
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.recover=yes;pc.ignore_sb=true'
[mysqld.2]
-wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.recover=yes;pc.ignore_sb=true'
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.recover=yes'
diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
index 42f210170bc..f56d0e657bd 100644
--- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
+++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
@@ -21,7 +21,8 @@ INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a');
# Disconnect node #2
--connection node_2
---source suite/galera/include/galera_unload_provider.inc
+SET SESSION wsrep_sync_wait=0;
+--source suite/galera/include/galera_stop_replication.inc
--connection node_1
UPDATE t1 SET f2 = 'b' WHERE f1 > 1;
@@ -43,7 +44,6 @@ UPDATE t1 SET f2 = 'c' WHERE f1 > 2;
# ... and restart provider to force IST
--echo Loading wsrep_provider ...
--disable_query_log
---eval SET GLOBAL wsrep_provider = '$wsrep_provider_orig';
# Make sure IST will block ...
--let $galera_sync_point = recv_IST_after_apply_trx
--source include/galera_set_sync_point.inc
diff --git a/mysql-test/suite/galera/t/galera_log_bin.inc b/mysql-test/suite/galera/t/galera_log_bin.inc
new file mode 100644
index 00000000000..cc78367b510
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_log_bin.inc
@@ -0,0 +1,46 @@
+--source include/galera_cluster.inc
+--source include/force_restart.inc
+
+--connection node_1
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+--connection node_2
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+
+#
+# Test Galera with --log-bin --log-slave-updates .
+# This way the actual MySQL binary log is used,
+# rather than Galera's own implementation
+#
+
+CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TABLE t2 (id INT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (1);
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT COUNT(*) = 2 FROM t2;
+
+--connection node_1
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+--let $MASTER_MYPORT=$NODE_MYPORT_1
+--source include/show_binlog_events.inc
+
+--connection node_2
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+--let $MASTER_MYPORT=$NODE_MYPORT_2
+--source include/show_binlog_events.inc
+
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo #cleanup
+--connection node_1
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
diff --git a/mysql-test/suite/galera/t/galera_log_bin.test b/mysql-test/suite/galera/t/galera_log_bin.test
index cc78367b510..923bd623a8a 100644
--- a/mysql-test/suite/galera/t/galera_log_bin.test
+++ b/mysql-test/suite/galera/t/galera_log_bin.test
@@ -1,46 +1 @@
---source include/galera_cluster.inc
---source include/force_restart.inc
-
---connection node_1
-set global wsrep_on=OFF;
-reset master;
-set global wsrep_on=ON;
---connection node_2
-set global wsrep_on=OFF;
-reset master;
-set global wsrep_on=ON;
-
-#
-# Test Galera with --log-bin --log-slave-updates .
-# This way the actual MySQL binary log is used,
-# rather than Galera's own implementation
-#
-
-CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1);
-
-CREATE TABLE t2 (id INT) ENGINE=InnoDB;
-INSERT INTO t2 VALUES (1);
-INSERT INTO t2 VALUES (1);
-
---connection node_2
-SELECT COUNT(*) = 1 FROM t1;
-SELECT COUNT(*) = 2 FROM t2;
-
---connection node_1
-ALTER TABLE t1 ADD COLUMN f2 INTEGER;
---let $MASTER_MYPORT=$NODE_MYPORT_1
---source include/show_binlog_events.inc
-
---connection node_2
-SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
---let $MASTER_MYPORT=$NODE_MYPORT_2
---source include/show_binlog_events.inc
-
-DROP TABLE t1;
-DROP TABLE t2;
-
---echo #cleanup
---connection node_1
-SET GLOBAL wsrep_on=OFF;
-RESET MASTER;
+--source galera_log_bin.inc
diff --git a/mysql-test/suite/galera/t/galera_log_bin_opt-master.opt b/mysql-test/suite/galera/t/galera_log_bin_opt-master.opt
new file mode 100644
index 00000000000..03fcb5d040d
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_log_bin_opt-master.opt
@@ -0,0 +1 @@
+--log-slave-updates --log-bin
diff --git a/mysql-test/suite/galera/t/galera_log_bin_opt.cnf b/mysql-test/suite/galera/t/galera_log_bin_opt.cnf
new file mode 100644
index 00000000000..a09efd2e011
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_log_bin_opt.cnf
@@ -0,0 +1,15 @@
+!include ../galera_2nodes.cnf
+
+[mysqld]
+wsrep_sst_method=mariabackup
+wsrep_sst_auth="root:"
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
+
+[mysqld.2]
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
+
+[sst]
+transferfmt=@ENV.MTR_GALERA_TFMT
+streamfmt=xbstream
diff --git a/mysql-test/suite/galera/t/galera_log_bin_opt.test b/mysql-test/suite/galera/t/galera_log_bin_opt.test
new file mode 100644
index 00000000000..f3d0afbc8bc
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_log_bin_opt.test
@@ -0,0 +1,2 @@
+--source include/have_mariabackup.inc
+--source galera_log_bin.inc
diff --git a/mysql-test/suite/galera/t/galera_rsu_error.test b/mysql-test/suite/galera/t/galera_rsu_error.test
index cad8154ac76..6de7607b6ec 100644
--- a/mysql-test/suite/galera/t/galera_rsu_error.test
+++ b/mysql-test/suite/galera/t/galera_rsu_error.test
@@ -9,6 +9,9 @@ CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
INSERT INTO t1 VALUES (1), (1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
+
SET SESSION wsrep_OSU_method = "RSU";
--error ER_DUP_ENTRY
ALTER TABLE t1 ADD PRIMARY KEY (f1);
diff --git a/mysql-test/suite/galera/t/galera_split_brain.test b/mysql-test/suite/galera/t/galera_split_brain.test
index 91a2cc326a2..6a822b8f127 100644
--- a/mysql-test/suite/galera/t/galera_split_brain.test
+++ b/mysql-test/suite/galera/t/galera_split_brain.test
@@ -26,18 +26,13 @@ call mtr.add_suppression("WSREP: TO isolation failed for: ");
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
# Reset the master and restart the slave so that post-test checks can run
-
-
--connection node_2
--source include/start_mysqld.inc
---sleep 5
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
---sleep 10
-
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/wait_until_connected_again.inc
@@ -46,4 +41,3 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
--source include/auto_increment_offset_restore.inc
--source include/galera_end.inc
-
diff --git a/mysql-test/suite/galera/t/galera_ssl_upgrade.cnf b/mysql-test/suite/galera/t/galera_ssl_upgrade.cnf
index 3fd1c650c05..2954ae0f4cb 100644
--- a/mysql-test/suite/galera/t/galera_ssl_upgrade.cnf
+++ b/mysql-test/suite/galera/t/galera_ssl_upgrade.cnf
@@ -1,7 +1,7 @@
!include ../galera_2nodes.cnf
[mysqld.1]
-wsrep_provider_options='base_port=@mysqld.1.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/cacert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/cakey.pem'
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem'
[mysqld.2]
-wsrep_provider_options='base_port=@mysqld.2.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/cacert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/cakey.pem'
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem'
diff --git a/mysql-test/suite/galera/t/galera_ssl_upgrade.test b/mysql-test/suite/galera/t/galera_ssl_upgrade.test
index 2ce932b5398..146a0126f1f 100644
--- a/mysql-test/suite/galera/t/galera_ssl_upgrade.test
+++ b/mysql-test/suite/galera/t/galera_ssl_upgrade.test
@@ -6,15 +6,33 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
+--source include/have_ssl_communication.inc
+
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown");
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--connection node_1
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown.*");
+--connection node_2
+call mtr.add_suppression("WSREP: write_handler(): protocol is shutdown.*");
+
+# Setup galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
# 2. Restart node #1 with a socket.ssl_ca that includes both the new and the old certificate
--connection node_1
--source include/shutdown_mysqld.inc
---let $start_mysqld_params = --wsrep-cluster-address=gcomm://127.0.0.1:$NODE_GALERAPORT_2 --wsrep_provider_options=base_port=$NODE_GALERAPORT_1;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera-upgrade-ca-cert.pem;socket.ssl_cert=$MYSQL_TEST_DIR/std_data/cacert.pem;socket.ssl_key=$MYSQL_TEST_DIR/std_data/cakey.pem
+--let $restart_noprint = 1
+--let $start_mysqld_params = --wsrep-cluster-address=gcomm://127.0.0.1:$NODE_GALERAPORT_2 --wsrep_provider_options=base_port=$NODE_GALERAPORT_1;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera-upgrade-ca-cert.pem;socket.ssl_cert=$MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=$MYSQL_TEST_DIR/std_data/galera-key.pem
--source include/start_mysqld.inc
--source include/wait_until_connected_again.inc
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_lost_found.test b/mysql-test/suite/galera/t/galera_sst_mariabackup_lost_found.test
index d1c30656139..e8dcbd849d8 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_lost_found.test
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_lost_found.test
@@ -10,7 +10,7 @@
--connection node_2
#--connection node_2
-#--source suite/galera/include/galera_unload_provider.inc
+#--source suite/galera/include/galera_stop_replication.inc
--echo Shutting down server ...
--source include/shutdown_mysqld.inc
diff --git a/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test b/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test
index 51eae7005df..89a1af845c9 100644
--- a/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test
+++ b/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test
@@ -9,6 +9,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
+
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
INSERT INTO t1 VALUES (2, 3);
diff --git a/mysql-test/suite/galera/t/galera_toi_lock_shared.test b/mysql-test/suite/galera/t/galera_toi_lock_shared.test
index 6857a0e08ca..566bc721926 100644
--- a/mysql-test/suite/galera/t/galera_toi_lock_shared.test
+++ b/mysql-test/suite/galera/t/galera_toi_lock_shared.test
@@ -10,14 +10,25 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t1
+--source include/wait_condition.inc
+
ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=SHARED;
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
INSERT INTO t1 VALUES (2, 2);
-SELECT COUNT(*) = 2 FROM t1;
+SELECT COUNT(*) AS EXPECT_2 FROM t1;
+SELECT * FROM t1;
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 2 FROM t1
+--source include/wait_condition.inc
INSERT INTO t1 VALUES (3, 3);
-SELECT COUNT(*) = 3 FROM t1;
+SELECT COUNT(*) AS EXPECT_3 FROM t1;
+SELECT * FROM t1;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_truncate.test b/mysql-test/suite/galera/t/galera_truncate.test
index 79f9bad1f1b..0000b430e45 100644
--- a/mysql-test/suite/galera/t/galera_truncate.test
+++ b/mysql-test/suite/galera/t/galera_truncate.test
@@ -3,8 +3,7 @@
#
--source include/galera_cluster.inc
---source include/have_innodb.inc
-
+--source include/have_perfschema.inc
#
# Simple case
#
@@ -14,6 +13,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 VALUES (1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
+
TRUNCATE TABLE t1;
SELECT COUNT(*) = 0 FROM t1;
@@ -29,6 +31,9 @@ CREATE TABLE t2 (f1 VARCHAR(255)) Engine=InnoDB;
INSERT INTO t2 VALUES ('abc');
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t2';
+--source include/wait_condition.inc
+
TRUNCATE TABLE t2;
--connection node_2
@@ -51,6 +56,23 @@ TRUNCATE TABLE t4;
--connection node_2
SELECT AUTO_INCREMENT = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN ('t3', 't4');
+
+#
+# MDEV-24865 : Server crashes when truncate mysql user table
+#
+--connection node_1
+
+--error ER_NO_SUCH_TABLE
+TRUNCATE TABLE mysql.user;
+--error ER_WRONG_PERFSCHEMA_USAGE
+TRUNCATE TABLE performance_schema.threads;
+--error ER_DBACCESS_DENIED_ERROR
+TRUNCATE TABLE information_schema.tables;
+TRUNCATE TABLE mysql.innodb_index_stats;
+--error ER_NO_SUCH_TABLE
+TRUNCATE TABLE foo.bar;
+TRUNCATE TABLE t1;
+
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
diff --git a/mysql-test/suite/galera/t/galera_truncate_temporary.test b/mysql-test/suite/galera/t/galera_truncate_temporary.test
index 3ad94eb9930..ea20911bd5d 100644
--- a/mysql-test/suite/galera/t/galera_truncate_temporary.test
+++ b/mysql-test/suite/galera/t/galera_truncate_temporary.test
@@ -67,6 +67,9 @@ CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (2);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
+
TRUNCATE TABLE t1;
SELECT COUNT(*) = 0 FROM t1;
diff --git a/mysql-test/suite/galera/t/galera_var_sst_auth.cnf b/mysql-test/suite/galera/t/galera_var_sst_auth.cnf
index ff29db2306b..114815d446a 100644
--- a/mysql-test/suite/galera/t/galera_var_sst_auth.cnf
+++ b/mysql-test/suite/galera/t/galera_var_sst_auth.cnf
@@ -5,8 +5,3 @@ wsrep_sst_auth=root:
[mysqld.2]
wsrep_sst_auth=root:
-
-
-
-
-
diff --git a/mysql-test/suite/galera/t/galera_var_sst_auth.test b/mysql-test/suite/galera/t/galera_var_sst_auth.test
index 5c9b3f5a61e..ebeaddc0e63 100644
--- a/mysql-test/suite/galera/t/galera_var_sst_auth.test
+++ b/mysql-test/suite/galera/t/galera_var_sst_auth.test
@@ -1,12 +1,39 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
---echo #
---echo # MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
---echo #
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source include/auto_increment_offset_save.inc
+
+#
+# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
+#
SELECT @@global.wsrep_sst_auth;
SET @@global.wsrep_sst_auth='foo:bar';
SELECT @@global.wsrep_sst_auth;
---source include/galera_end.inc
+#
+# MDEV-24509 Warning: Memory not freed: 56 on SET @@global.wsrep_sst_auth
+#
+--connection node_2
+SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz';
+SELECT @@global.wsrep_sst_auth;
+--echo Shutdown node_2
+--source include/shutdown_mysqld.inc
+
+# On node_1, verify that the node has left the cluster.
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+
+# Restart node_2
+--connection node_2
+--source include/start_mysqld.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+SELECT @@global.wsrep_sst_auth;
+
+# Restore original auto_increment_offset values.
+--source include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera/t/galera_var_wsrep_on_off.test b/mysql-test/suite/galera/t/galera_var_wsrep_on_off.test
index 783b78792e6..10517f877ae 100644
--- a/mysql-test/suite/galera/t/galera_var_wsrep_on_off.test
+++ b/mysql-test/suite/galera/t/galera_var_wsrep_on_off.test
@@ -30,3 +30,138 @@ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 3;
DROP TABLE t1;
+
+#
+# Test that variable wsrep_on cannot be changed while in
+# active transaction.
+#
+
+--connection node_1
+START TRANSACTION;
+--error ER_CANT_DO_THIS_DURING_AN_TRANSACTION
+SET SESSION wsrep_on=OFF;
+--error ER_CANT_DO_THIS_DURING_AN_TRANSACTION
+SET GLOBAL wsrep_on=OFF;
+COMMIT;
+
+
+#
+# Test that @@global.wsrep_on does not affect the value of
+# @@session.wsrep_on of current sessions
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+--connection node_1a
+SET GLOBAL wsrep_on = OFF;
+
+--connection node_1
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+INSERT INTO t1 VALUES (2);
+COMMIT;
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 15;
+SELECT * FROM t1;
+
+--connection node_1a
+SET GLOBAL wsrep_on = ON;
+DROP TABLE t1;
+
+
+#
+# New connections inherit @@session.wsrep_on from @@global.wsrep_on
+#
+
+--connection node_1
+SET GLOBAL wsrep_on = OFF;
+
+--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+--connection node_1b
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+CREATE TABLE t2 (f1 INTEGER);
+DROP TABLE t2;
+
+SET GLOBAL wsrep_on = ON;
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+
+--disconnect node_1b
+
+
+#
+# Can't set @@session.wsrep_on = ON, while @@global.wsrep_on = OFF
+#
+
+--connection node_1
+SET GLOBAL wsrep_on = OFF;
+--error ER_WRONG_ARGUMENTS
+SET SESSION wsrep_on = ON;
+
+SET GLOBAL wsrep_on = ON;
+SET SESSION wsrep_on = ON;
+
+
+#
+# @@global.wsrep_on = OFF followed by @@global.wsrep_on = ON
+# in a new connection
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET GLOBAL wsrep_on = OFF;
+--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+--connection node_1b
+SHOW SESSION VARIABLES LIKE 'wsrep_on';
+SHOW GLOBAL VARIABLES LIKE 'wsrep_on';
+SET GLOBAL wsrep_on = ON;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+COMMIT;
+
+SELECT * FROM t1;
+
+--connection node_2
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+
+#
+# Test single statement, multi statement, and
+# TOI tansactions while @@session.wsrep_on = OFF
+# and then same @@global.wsrep_on = OFF.
+# Notice, the combination @@global.wsrep_on = OFF
+# and @@session.wsrep_on = ON is not not possible,
+# (as tested above in this test case)
+#
+
+--connection node_1
+SET SESSION wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+START TRANSACTION;
+INSERT INTO t1 VALUES (2);
+COMMIT;
+DROP TABLE t1;
+--connection node_2
+SHOW TABLES;
+--connection node_1
+SET SESSION wsrep_on = ON;
+
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+START TRANSACTION;
+INSERT INTO t1 VALUES (2);
+COMMIT;
+--connection node_2
+SHOW TABLES;
+--connection node_1
+DROP TABLE t1;
+SET GLOBAL wsrep_on = ON;
diff --git a/mysql-test/suite/galera/t/galera_var_wsrep_provider_options.test b/mysql-test/suite/galera/t/galera_var_wsrep_provider_options.test
new file mode 100644
index 00000000000..3dc2cbaa7d6
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_var_wsrep_provider_options.test
@@ -0,0 +1,11 @@
+--source include/galera_cluster.inc
+
+call mtr.add_suppression("WSREP: Unknown parameter 'a'");
+call mtr.add_suppression("WSREP: Set options returned 7");
+
+--error ER_WRONG_ARGUMENTS
+SET GLOBAL wsrep_provider_options=NULL;
+SET GLOBAL wsrep_provider_options='';
+SET GLOBAL wsrep_provider_options=' ';
+--error ER_WRONG_ARGUMENTS
+SET GLOBAL wsrep_provider_options='a=1';
diff --git a/mysql-test/suite/sys_vars/t/wsrep_start_position_basic.test b/mysql-test/suite/galera/t/galera_var_wsrep_start_position.test
index 3e57cfa6da2..43fd09b902c 100644
--- a/mysql-test/suite/sys_vars/t/wsrep_start_position_basic.test
+++ b/mysql-test/suite/galera/t/galera_var_wsrep_start_position.test
@@ -1,12 +1,12 @@
---source include/have_wsrep.inc
+--source include/galera_cluster.inc
--echo #
--echo # wsrep_start_position
--echo #
---echo # save the initial value
-SET @wsrep_start_position_global_saved = @@global.wsrep_start_position;
+CALL mtr.add_suppression("WSREP: SST failed for position .*");
+SET @wsrep_start_position_global_saved = @@global.wsrep_start_position;
--echo # default
SELECT @@global.wsrep_start_position;
@@ -14,40 +14,60 @@ SELECT @@global.wsrep_start_position;
--echo # scope
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
SELECT @@session.wsrep_start_position;
-SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-1';
SELECT @@global.wsrep_start_position;
--echo
--echo # valid values
-SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2';
-SELECT @@global.wsrep_start_position;
-SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
-SELECT @@global.wsrep_start_position;
-SET @@global.wsrep_start_position=default;
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-1';
SELECT @@global.wsrep_start_position;
--echo
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:0';
+SELECT @@global.wsrep_start_position;
--echo # invalid values
--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+SELECT @@global.wsrep_start_position;
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2';
+SELECT @@global.wsrep_start_position;
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:-2A';
+SELECT @@global.wsrep_start_position;
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_start_position='00000000-0000-0000-0000-000000000000:0A';
+SELECT @@global.wsrep_start_position;
+--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='000000000000000-0000-0000-0000-000000000000:-1';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='12345678-1234-1234-12345-123456789012:100';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='12345678-1234-123-12345-123456789012:0';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:_99999';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='12345678-1234-1234-1234-123456789012:a';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='OFF';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position=ON;
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='';
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position=NULL;
+SELECT @@global.wsrep_start_position;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_start_position='junk';
+SELECT @@global.wsrep_start_position;
--echo
--echo # restore the initial value
diff --git a/mysql-test/suite/galera/t/galera_virtual_blob.test b/mysql-test/suite/galera/t/galera_virtual_blob.test
new file mode 100644
index 00000000000..a10e3025668
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_virtual_blob.test
@@ -0,0 +1,10 @@
+--source include/galera_cluster.inc
+
+CREATE TABLE t (f INT GENERATED ALWAYS AS (a+b)VIRTUAL,a INT,b INT,h BLOB);
+INSERT INTO t (a,b)VALUES(0,0), (0,0), (0,0), (0,0), (0,0);
+SELECT * from t;
+
+--connection node_2
+SELECT * from t;
+--connection node_1
+DROP TABLE t;
diff --git a/mysql-test/suite/galera/t/galera_virtual_column.test b/mysql-test/suite/galera/t/galera_virtual_column.test
new file mode 100644
index 00000000000..84e1da024f1
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_virtual_column.test
@@ -0,0 +1,42 @@
+#
+# This test is for testing virtual columnm support in galera cluster
+#
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# test case for verifying that cascaded delete in a table with virtual column does not crash slave node
+#
+
+--connection node_1
+
+CREATE TABLE p (id INT UNSIGNED PRIMARY KEY AUTO_INCREMENT) ENGINE = InnoDB;
+CREATE TABLE c (id INT UNSIGNED PRIMARY KEY AUTO_INCREMENT, pid INT UNSIGNED, bitmap TINYINT UNSIGNED NOT NULL DEFAULT 0, bitmap5 TINYINT UNSIGNED GENERATED ALWAYS AS (bitmap&(1<<5)) VIRTUAL, FOREIGN KEY (pid) REFERENCES p (id) ON DELETE CASCADE ON UPDATE CASCADE);
+
+# not sure of this index is needed for the test
+CREATE INDEX bitmap5 ON c(bitmap5) USING BTREE;
+
+INSERT INTO p VALUES(1);
+INSERT INTO c(pid) VALUES(1);
+
+
+--connection node_2
+# wait until both INSERTS have arrived in node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM c
+--source include/wait_condition.inc
+
+--connection node_1
+# delete from parent table, it will cascade into child table
+# node_2 might have problem in applying this cascaded delete
+DELETE FROM p WHERE id=1;
+
+SELECT * FROM p;
+SELECT * FROM c;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM c;
+--source include/wait_condition.inc
+
+--connection node_1
+DROP TABLE c;
+DROP TABLE p;
diff --git a/mysql-test/suite/galera/t/lp1376747-4.test b/mysql-test/suite/galera/t/lp1376747-4.test
index 7cf922b97e5..d19ff422ab0 100644
--- a/mysql-test/suite/galera/t/lp1376747-4.test
+++ b/mysql-test/suite/galera/t/lp1376747-4.test
@@ -5,7 +5,8 @@
# after provider is unpaused
#
--source include/galera_cluster.inc
---source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
--let $galera_connection_name = node_2a
--let $galera_server_number = 2
@@ -17,7 +18,7 @@ INSERT INTO t1 VALUES (1);
--connection node_2
SET session wsrep_sync_wait=0;
-FLUSH TABLE WITH READ LOCK;
+FLUSH TABLES WITH READ LOCK;
--connection node_1
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
@@ -25,32 +26,33 @@ INSERT INTO t1 VALUES (2,3);
--connection node_2a
SET session wsrep_sync_wait=0;
-
---echo # node_1 DDL should not yet be applied
SHOW CREATE TABLE t1;
-
+SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2';
--send FLUSH TABLES t1 WITH READ LOCK;
--connection node_2
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%committed%';
---source include/wait_condition.inc
-
+SET debug_sync='now WAIT_FOR parked2';
+
+# let the flush table wait in pause state before we unlock
+# table otherwise there is window where-in flush table is
+# yet to wait in pause and unlock allows alter table to proceed.
+# this is because send is asynchronous.
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE Info LIKE 'FLUSH TABLES t1 WITH READ LOCK';
+--let $wait_condition_on_error_output = SHOW PROCESSLIST
+--source include/wait_condition_with_debug.inc
+
+SET debug_sync='now SIGNAL go2';
+# this will release existing lock but will not resume
+# the cluster as there is new FTRL that is still pausing it.
UNLOCK TABLES;
-
---echo # node_1 DDL should not yet be applied 2
SHOW CREATE TABLE t1;
+set debug_sync= 'RESET';
--connection node_2a
--reap
UNLOCK TABLES;
---let $wait_condition = SELECT COUNT(*) = 2 FROM t1;
---source include/wait_condition.inc
-
---let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 't1';
---source include/wait_condition.inc
-
---echo # node_1 DDL should be applied 2
+SET SESSION wsrep_sync_wait = DEFAULT;
SHOW CREATE TABLE t1;
SELECT * from t1;
diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def
index 0e2706f2dc3..87eca207d5a 100644
--- a/mysql-test/suite/galera_3nodes/disabled.def
+++ b/mysql-test/suite/galera_3nodes/disabled.def
@@ -10,7 +10,16 @@
#
##############################################################################
+GAL-501 : MDEV-24645 galera_3nodes.GAL-501 MTR failed: failed to open gcomm backend connection: 110
galera_gtid_2_cluster : MDEV-23775 Galera test failure on galera_3nodes.galera_gtid_2_cluster
+galera_ipv6_mariabackup : MDEV-24440: galera_3nodes.galera_ipv6_mariabackup MTR fails sporadically: Failed to read from: wsrep_sst_mariabackup --role 'donor' --address '[::1]:16028/xtrabackup_sst//1'
+galera_ipv6_mariabackup_section : MDEV-22195: galera_3nodes.galera_ipv6_mariabackup_section MTR failed: assert_grep.inc failed
+galera_ipv6_mysqldump : MDEV-24036: galera_3nodes.galera_ipv6_mysqldump: rare random crashes during shutdown
+galera_ipv6_rsync_section : MDEV-23580: galera_3nodes.galera_ipv6_rsync_section MTR failed: WSREP_SST: [ERROR] rsync daemon port '16008' has been taken
galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(}
-galera_slave_options_do :MDEV-8798
+galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query
+galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query
+galera_pc_bootstrap : MDEV-24650 galera_pc_bootstrap MTR failed: Could not execute 'check-testcase' before testcase
+galera_safe_to_bootstrap : MDEV-24097 galera_3nodes.galera_safe_to_bootstrap MTR sporadaically fails: Failed to start mysqld or mysql_shutdown failed
+galera_slave_options_do : MDEV-8798
galera_slave_options_ignore : MDEV-8798
diff --git a/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result b/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result
new file mode 100644
index 00000000000..87898891f9b
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result
@@ -0,0 +1,89 @@
+connection node_2;
+connection node_1;
+connect node_5, 127.0.0.1, root, , test, $NODE_MYPORT_5;
+connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4;
+connection node_4;
+CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_1, master_use_gtid=current_pos;;
+START SLAVE;
+include/wait_for_slave_to_start.inc
+connection node_1;
+CREATE TABLE t1(c1 INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 INTEGER);
+INSERT INTO t1(c2) VALUES(1);
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_3;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_1;
+include/save_master_gtid.inc
+connection node_4;
+include/sync_with_master_gtid.inc
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connect node_6, 127.0.0.1, root, , test, $NODE_MYPORT_6;
+connection node_6;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_2;
+ALTER TABLE t1 ADD COLUMN t3 INTEGER;
+Node 2 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+COUNT(*) = 3
+1
+connection node_3;
+Node 3 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+COUNT(*) = 3
+1
+connection node_1;
+include/save_master_gtid.inc
+connection node_4;
+include/sync_with_master_gtid.inc
+Node 4 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+COUNT(*) = 3
+1
+connection node_6;
+Node 6 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+COUNT(*) = 3
+1
+connection node_2;
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize status OK
+connection node_1;
+connection node_4;
+connection node_6;
+connection node_1;
+DROP TABLE t1;
+connection node_4;
+STOP SLAVE;
+RESET SLAVE;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+SET GLOBAL GTID_SLAVE_POS="";
+connection node_1;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+connection node_2;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+connection node_3;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+connection node_5;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+connection node_6;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ssl_reload.result b/mysql-test/suite/galera_3nodes/r/galera_ssl_reload.result
new file mode 100644
index 00000000000..541822797b7
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/r/galera_ssl_reload.result
@@ -0,0 +1,15 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+connection node_2;
+connection node_2;
+# restart: with restart_parameters
+SET GLOBAL wsrep_provider_options = 'socket.ssl_reload=1';
+connection node_3;
+# restart: with restart_parameters
+connection node_2;
+FLUSH SSL;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema_init.result b/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema_init.result
new file mode 100644
index 00000000000..2a29afd62be
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema_init.result
@@ -0,0 +1,94 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_3;
+SHOW CREATE TABLE mysql.wsrep_cluster;
+Table Create Table
+wsrep_cluster CREATE TABLE `wsrep_cluster` (
+ `cluster_uuid` char(36) NOT NULL,
+ `view_id` bigint(20) NOT NULL,
+ `view_seqno` bigint(20) NOT NULL,
+ `protocol_version` int(11) NOT NULL,
+ `capabilities` int(11) NOT NULL,
+ PRIMARY KEY (`cluster_uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SHOW CREATE TABLE mysql.wsrep_cluster_members;
+Table Create Table
+wsrep_cluster_members CREATE TABLE `wsrep_cluster_members` (
+ `node_uuid` char(36) NOT NULL,
+ `cluster_uuid` char(36) NOT NULL,
+ `node_name` char(32) NOT NULL,
+ `node_incoming_address` varchar(256) NOT NULL,
+ PRIMARY KEY (`node_uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT @@sql_safe_updates;
+@@sql_safe_updates
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster;
+COUNT(*) = 1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size')
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+COUNT(*) = 1
+1
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+node_incoming_address LIKE '127.0.0.1:%'
+1
+1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+1
+1
+connection node_2;
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size')
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+COUNT(*) = 1
+1
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+node_incoming_address LIKE '127.0.0.1:%'
+1
+1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+1
+1
+connection node_3;
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size')
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+COUNT(*) = 1
+1
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+node_incoming_address LIKE '127.0.0.1:%'
+1
+1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+1
+1
diff --git a/mysql-test/suite/galera_3nodes/t/galera_2_cluster.cnf b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.cnf
new file mode 100644
index 00000000000..3889a4f4fdd
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.cnf
@@ -0,0 +1,25 @@
+!include ../galera_2x3nodes.cnf
+
+[mysqld.1]
+wsrep_gtid_domain_id=1
+server-id=11
+
+[mysqld.2]
+wsrep_gtid_domain_id=1
+server-id=12
+
+[mysqld.3]
+wsrep_gtid_domain_id=1
+server-id=13
+
+[mysqld.4]
+wsrep_gtid_domain_id=2
+server-id=21
+
+[mysqld.5]
+wsrep_gtid_domain_id=2
+server-id=22
+
+[mysqld.6]
+wsrep_gtid_domain_id=2
+server-id=23 \ No newline at end of file
diff --git a/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test
new file mode 100644
index 00000000000..8a9a74a7252
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test
@@ -0,0 +1,148 @@
+#
+# This test creates 2x3 nodes galera cluster.
+#
+# A(1) <-> B(2) <-> C(3) {Galera cluster 1}
+# | {Circular Async replication}
+# D(4) <-> E(5) <-> F(6) {Galera cluster 2}
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_5, 127.0.0.1, root, , test, $NODE_MYPORT_5
+
+--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4
+--connection node_4
+
+--replace_result $NODE_MYPORT_1 NODE_MYPORT_1
+--eval CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=$NODE_MYPORT_1, master_use_gtid=current_pos;
+START SLAVE;
+--source include/wait_for_slave_to_start.inc
+
+#
+# CREATE TABLE & INSERT
+#
+
+--connection node_1
+
+CREATE TABLE t1(c1 INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 INTEGER);
+INSERT INTO t1(c2) VALUES(1);
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_3
+
+SELECT COUNT(*) = 1 FROM t1;
+
+--connection node_1
+--source include/save_master_gtid.inc
+
+--connection node_4
+--source include/sync_with_master_gtid.inc
+
+SELECT COUNT(*) = 1 FROM t1;
+
+--connect node_6, 127.0.0.1, root, , test, $NODE_MYPORT_6
+--connection node_6
+
+SELECT COUNT(*) = 1 FROM t1;
+
+#
+# ALTER TABLE
+#
+
+--connection node_2
+
+ALTER TABLE t1 ADD COLUMN t3 INTEGER;
+--echo Node 2 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+
+--connection node_3
+
+--echo Node 3 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+
+--connection node_1
+--source include/save_master_gtid.inc
+
+--connection node_4
+--source include/sync_with_master_gtid.inc
+
+--echo Node 4 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+
+--connection node_6
+
+--echo Node 6 column number AFTER ALTER
+SELECT COUNT(*) = 3 FROM information_schema.columns WHERE table_name ='t1';
+
+#
+# OPTIMIZE TABLE
+#
+
+--connection node_2
+
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+OPTIMIZE TABLE t1;
+
+--connection node_1
+
+--let $wait_condition = SELECT VARIABLE_VALUE >= $wsrep_last_committed_before + 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'
+--source include/wait_condition.inc
+
+--connection node_4
+
+--let $wait_condition = SELECT VARIABLE_VALUE >= $wsrep_last_committed_before + 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'
+--source include/wait_condition.inc
+
+--connection node_6
+
+--let $wait_condition = SELECT VARIABLE_VALUE >= $wsrep_last_committed_before + 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'
+--source include/wait_condition.inc
+
+#
+# Cleanup
+#
+
+--connection node_1
+
+DROP TABLE t1;
+
+--connection node_4
+
+STOP SLAVE;
+RESET SLAVE;
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+SET GLOBAL GTID_SLAVE_POS="";
+
+--connection node_1
+
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+
+--connection node_2
+
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+
+--connection node_3
+
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+
+--connection node_5
+
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
+
+--connection node_6
+
+SET GLOBAL wsrep_on = OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on = ON;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
index f5dd1aeb06d..2f8bf07b5b5 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
@@ -37,7 +37,7 @@ SET GLOBAL wsrep_sst_method = 'mysqldump';
#
#--connection node_2
-#--source suite/galera/include/galera_unload_provider.inc
+#--source suite/galera/include/galera_stop_replication.inc
--echo Shutting down server ...
--source include/shutdown_mysqld.inc
@@ -63,7 +63,7 @@ INSERT INTO t1 VALUES (1);
let $restart_noprint=2;
--source include/start_mysqld.inc
-#--source suite/galera/include/galera_load_provider.inc
+#--source suite/galera/include/galera_start_replication.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
index 69e80ee6c3d..b77a810f37d 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
@@ -29,10 +29,10 @@ INSERT INTO t1 VALUES (01), (02), (03), (04), (05);
# Disconnect nodes #2 and #3
--connection node_2
---source suite/galera/include/galera_unload_provider.inc
+--source suite/galera/include/galera_stop_replication.inc
--connection node_3
---source suite/galera/include/galera_unload_provider.inc
+--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--source include/wait_until_connected_again.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.cnf b/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.cnf
new file mode 100644
index 00000000000..65d1599268d
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.cnf
@@ -0,0 +1,10 @@
+!include ../galera_3nodes.cnf
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;socket.ssl=yes;socket.ssl_ca=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.root.crt;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.crt;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.key'
+
+[mysqld.2]
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;socket.ssl=yes;socket.ssl_ca=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.root.crt;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.crt;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.key'
+
+[mysqld.3]
+wsrep_provider_options='base_port=@mysqld.3.#galera_port;socket.ssl=yes;socket.ssl_ca=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.root.crt;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.crt;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera_certs/galera.1.key'
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.test b/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.test
new file mode 100644
index 00000000000..0c22a58b3e9
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_ssl_reload.test
@@ -0,0 +1,67 @@
+#
+# Test reloading of Galera SSL certificate without shutting down node
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+# Setup galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
+# Setup temporary file for SSL reloading
+let $ssl_cert = $MYSQLTEST_VARDIR/tmp/ssl_cert.pem;
+let $ssl_key = $MYSQLTEST_VARDIR/tmp/ssl_key.pem;
+let $ssl_ca = $MYSQLTEST_VARDIR/tmp/ssl_ca.pem;
+
+copy_file std_data/galera_certs/galera.root.crt $ssl_ca;
+copy_file std_data/galera_certs/galera.1.crt $ssl_cert;
+copy_file std_data/galera_certs/galera.1.key $ssl_key;
+
+--connection node_2
+--source include/shutdown_mysqld.inc
+--let $restart_noprint=1
+--let $restart_parameters = --wsrep_cluster_address=gcomm://127.0.0.1:$NODE_GALERAPORT_1 --wsrep_provider_options=base_port=$NODE_GALERAPORT_2;socket.ssl=yes;socket.ssl_ca=$MYSQL_TEST_DIR/std_data/galera_certs/galera.root.crt;socket.ssl_cert=$MYSQLTEST_VARDIR/tmp/ssl_cert.pem;socket.ssl_key=$MYSQLTEST_VARDIR/tmp/ssl_key.pem
+--source include/start_mysqld.inc
+--source include/galera_wait_ready.inc
+
+# Set certificate and key and reload by setting directly `wsrep_provider_options`
+remove_file $ssl_cert;
+remove_file $ssl_key;
+copy_file std_data/galera_certs/galera.2.crt $ssl_cert;
+copy_file std_data/galera_certs/galera.2.key $ssl_key;
+SET GLOBAL wsrep_provider_options = 'socket.ssl_reload=1';
+
+--connection node_3
+--source include/shutdown_mysqld.inc
+--let $restart_parameters = --wsrep_cluster_address=gcomm://127.0.0.1:$NODE_GALERAPORT_2
+--source include/start_mysqld.inc
+
+# Set certificate and key and reload by executing `FLUSH SSL`
+--connection node_2
+remove_file $ssl_cert;
+remove_file $ssl_key;
+copy_file std_data/galera_certs/galera.1.crt $ssl_cert;
+copy_file std_data/galera_certs/galera.1.key $ssl_key;
+FLUSH SSL;
+
+# Cleanup
+remove_file $ssl_ca;
+remove_file $ssl_cert;
+remove_file $ssl_key;
+
+# Restore original auto_increment_offset values.
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.cnf b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.cnf
new file mode 100644
index 00000000000..317094cea72
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.cnf
@@ -0,0 +1,5 @@
+!include ../galera_3nodes.cnf
+
+[mysqld]
+sql-safe-updates=1
+wsrep-debug=1
diff --git a/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.test b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.test
new file mode 100644
index 00000000000..7d8089a8ceb
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema_init.test
@@ -0,0 +1,58 @@
+#
+# This test performs basic checks on the contents of the wsrep_schema
+#
+# wsrep_members_history checks are temporarily disabled until it
+# can be made configurable.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+# Make the test fail if table structure has changed
+
+SHOW CREATE TABLE mysql.wsrep_cluster;
+SHOW CREATE TABLE mysql.wsrep_cluster_members;
+#disabled SHOW CREATE TABLE mysql.wsrep_member_history;
+SELECT @@sql_safe_updates;
+
+# Checks for the wsrep_cluster table
+
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+
+# Checks for the wsrep_cluster_members table
+
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+
+--connection node_2
+
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+
+--connection node_3
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+
+--source ../galera/include/auto_increment_offset_restore.inc
+
diff --git a/mysql-test/suite/galera_sr/disabled.def b/mysql-test/suite/galera_sr/disabled.def
index c0e5857d6bc..d52ee32f610 100644
--- a/mysql-test/suite/galera_sr/disabled.def
+++ b/mysql-test/suite/galera_sr/disabled.def
@@ -11,5 +11,4 @@
##############################################################################
GCF-1060 : MDEV-20848 galera_sr.GCF_1060
-galera-features#56 : MDEV-18542 galera_sr.galera-features#56
-
+galera-features#56 : MDEV-24896
diff --git a/mysql-test/suite/galera_sr/r/MDEV-25226.result b/mysql-test/suite/galera_sr/r/MDEV-25226.result
new file mode 100644
index 00000000000..4699023562d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/MDEV-25226.result
@@ -0,0 +1,24 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+SET SESSION wsrep_on=OFF;
+ERROR 25000: You are not allowed to execute this command in a transaction
+SET GLOBAL wsrep_on=OFF;
+ERROR 25000: You are not allowed to execute this command in a transaction
+INSERT INTO t1 VALUES(2);
+COMMIT;
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera-features#56.result b/mysql-test/suite/galera_sr/r/galera-features#56.result
index a4264739cbf..15fcb475acb 100644
--- a/mysql-test/suite/galera_sr/r/galera-features#56.result
+++ b/mysql-test/suite/galera_sr/r/galera-features#56.result
@@ -18,21 +18,21 @@ set session wsrep_sync_wait=0;
SET GLOBAL wsrep_slave_threads = 4;
SET SESSION wsrep_trx_fragment_size = 1;
connection node_1;
-INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;;
connection node_1a;
-INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;;
connection node_2;
-INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;;
connection node_1;
connection node_1a;
connection node_2;
set session wsrep_sync_wait=15;
SELECT COUNT(*) FROM t1;
COUNT(*)
-30000
+300
SELECT COUNT(DISTINCT f1) FROM t1;
COUNT(DISTINCT f1)
-30000
+300
connection default;
DROP TABLE t1;
DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/GCF-900.test b/mysql-test/suite/galera_sr/t/GCF-900.test
index 3f1b53630b6..b44423c5013 100644
--- a/mysql-test/suite/galera_sr/t/GCF-900.test
+++ b/mysql-test/suite/galera_sr/t/GCF-900.test
@@ -15,6 +15,9 @@ START TRANSACTION;
INSERT INTO t1 VALUES (2, 0);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1';
+--source include/wait_condition.inc
+
ALTER TABLE t1 DROP COLUMN f2;
--connection node_1
diff --git a/mysql-test/suite/galera_sr/t/MDEV-25226.test b/mysql-test/suite/galera_sr/t/MDEV-25226.test
new file mode 100644
index 00000000000..3d19a0b9675
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/MDEV-25226.test
@@ -0,0 +1,33 @@
+#
+# MDEV-25226 - Test the case the where wsrep_on is set OFF
+# on a transaction that has already replicated a fragment.
+#
+# This would cause: Assertion `transaction_.active() == false ||
+# (transaction_.state() == wsrep::transaction::s_executing ||
+# transaction_.state() == wsrep::transaction::s_prepared ||
+# transaction_.state() == wsrep::transaction::s_aborted ||
+# transaction_.state() == wsrep::transaction::s_must_abort)'
+#
+
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES(1);
+--error ER_CANT_DO_THIS_DURING_AN_TRANSACTION
+SET SESSION wsrep_on=OFF;
+--error ER_CANT_DO_THIS_DURING_AN_TRANSACTION
+SET GLOBAL wsrep_on=OFF;
+INSERT INTO t1 VALUES(2);
+COMMIT;
+
+--connection node_1
+SELECT * FROM t1;
+
+--connection node_2
+SELECT * FROM t1;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera-features#56.test b/mysql-test/suite/galera_sr/t/galera-features#56.test
index 4d46a3bf853..ac73c2efe3d 100644
--- a/mysql-test/suite/galera_sr/t/galera-features#56.test
+++ b/mysql-test/suite/galera_sr/t/galera-features#56.test
@@ -3,8 +3,6 @@
##
--source include/galera_cluster.inc
---source include/have_innodb.inc
---source include/big_test.inc
# Create a second connection to node1 so that we can run transactions concurrently
--let $galera_connection_name = node_1a
@@ -19,7 +17,6 @@ INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
SET SESSION wsrep_trx_fragment_size = 1;
-
--connection node_2
set session wsrep_sync_wait=15;
SELECT COUNT(*) from ten;
@@ -35,13 +32,13 @@ SET GLOBAL wsrep_slave_threads = 4;
SET SESSION wsrep_trx_fragment_size = 1;
--connection node_1
---send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;
--connection node_1a
---send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;
--connection node_2
---send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2;
--connection node_1
--reap
diff --git a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
index 28c854c44f4..f4350d25ae9 100644
--- a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
+++ b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
@@ -343,11 +343,12 @@ DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
}
---error ER_PARSE_ERROR
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
---error ER_PARSE_ERROR
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
+SHOW CREATE TABLE t1;
DROP TABLE t1;
--error ER_PARSE_ERROR
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
index a98652248f8..6da3f3c14d3 100644
--- a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
@@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0);
DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL,
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1
diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
index 82a879be3f7..0d7aaeab1fa 100644
--- a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
@@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0);
DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL,
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1
diff --git a/mysql-test/suite/gcol/r/innodb_virtual_fk.result b/mysql-test/suite/gcol/r/innodb_virtual_fk.result
index 50685e04a69..252274f3e0a 100644
--- a/mysql-test/suite/gcol/r/innodb_virtual_fk.result
+++ b/mysql-test/suite/gcol/r/innodb_virtual_fk.result
@@ -790,3 +790,36 @@ t1 CREATE TABLE `t1` (
ALTER TABLE t1 DROP INDEX f1;
ALTER TABLE t1 DROP f3;
DROP TABLE t1;
+#
+# MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB
+#
+SET FOREIGN_KEY_CHECKS=1;
+CREATE DATABASE `a-b`;
+USE `a-b`;
+CREATE TABLE emails (
+id int,
+PRIMARY KEY (id)
+) ENGINE=InnoDB;
+CREATE TABLE email_stats (
+id int,
+email_id int,
+date_sent char(4),
+generated_email_id int as (email_id),
+#generated_sent_date DATE GENERATED ALWAYS AS (date_sent),
+PRIMARY KEY (id),
+KEY mautic_generated_sent_date_email_id (generated_email_id),
+FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL
+) ENGINE=InnoDB;
+CREATE TABLE emails_metadata (
+email_id int,
+PRIMARY KEY (email_id),
+CONSTRAINT FK FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+INSERT INTO emails VALUES (1);
+INSERT INTO email_stats (id, email_id, date_sent) VALUES (1,1,'Jan');
+INSERT INTO emails_metadata VALUES (1);
+DELETE FROM emails;
+DROP TABLE email_stats;
+DROP TABLE emails_metadata;
+DROP TABLE emails;
+DROP DATABASE `a-b`;
diff --git a/mysql-test/suite/gcol/r/virtual_index_drop.result b/mysql-test/suite/gcol/r/virtual_index_drop.result
new file mode 100644
index 00000000000..012e61be459
--- /dev/null
+++ b/mysql-test/suite/gcol/r/virtual_index_drop.result
@@ -0,0 +1,69 @@
+#
+# MDEV-24971 InnoDB access freed virtual column
+# after rollback of secondary index
+#
+CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
+INSERT INTO t1(f1) VALUES(1), (1);
+ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
+ERROR 23000: Duplicate entry '3' for key 'f2'
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) DEFAULT NULL,
+ `f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
+INSERT INTO t1(f1) VALUES(1), (1);
+ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
+ERROR 23000: Duplicate entry '3' for key 'f2'
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) DEFAULT NULL,
+ `f2` int(11) GENERATED ALWAYS AS (`f1` + 2) VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
+SET DEBUG_DBUG="+d,create_index_fail";
+SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
+ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
+connect con1,localhost,root,,,;
+SET DEBUG_SYNC="now WAIT_FOR con1_go";
+BEGIN;
+SELECT * FROM t1;
+f1 f2
+SET DEBUG_SYNC="now SIGNAL alter_signal";
+connection default;
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+connection con1;
+rollback;
+connection default;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) DEFAULT NULL,
+ `f2` int(11) GENERATED ALWAYS AS (`f1`) VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
+SET DEBUG_DBUG="+d,create_index_fail";
+SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
+ALTER TABLE t1 ADD INDEX(f2);
+connection con1;
+SET DEBUG_SYNC="now WAIT_FOR con1_go";
+BEGIN;
+INSERT INTO t1(f1) VALUES(1);
+SET DEBUG_SYNC="now SIGNAL alter_signal";
+connection default;
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+DROP TABLE t1;
+CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
+ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
+ERROR 42S21: Duplicate column name 'f3'
+DROP TABLE t1;
+SET DEBUG_SYNC=RESET;
diff --git a/mysql-test/suite/gcol/t/innodb_virtual_fk.test b/mysql-test/suite/gcol/t/innodb_virtual_fk.test
index 23d3ee97290..24b6a4631e6 100644
--- a/mysql-test/suite/gcol/t/innodb_virtual_fk.test
+++ b/mysql-test/suite/gcol/t/innodb_virtual_fk.test
@@ -649,3 +649,44 @@ SHOW CREATE TABLE t1;
ALTER TABLE t1 DROP INDEX f1;
ALTER TABLE t1 DROP f3;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB
+--echo #
+SET FOREIGN_KEY_CHECKS=1;
+CREATE DATABASE `a-b`;
+USE `a-b`;
+CREATE TABLE emails (
+ id int,
+ PRIMARY KEY (id)
+) ENGINE=InnoDB;
+
+CREATE TABLE email_stats (
+ id int,
+ email_id int,
+ date_sent char(4),
+ generated_email_id int as (email_id),
+ #generated_sent_date DATE GENERATED ALWAYS AS (date_sent),
+ PRIMARY KEY (id),
+ KEY mautic_generated_sent_date_email_id (generated_email_id),
+ FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL
+) ENGINE=InnoDB;
+
+
+CREATE TABLE emails_metadata (
+ email_id int,
+ PRIMARY KEY (email_id),
+ CONSTRAINT FK FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+
+
+INSERT INTO emails VALUES (1);
+INSERT INTO email_stats (id, email_id, date_sent) VALUES (1,1,'Jan');
+INSERT INTO emails_metadata VALUES (1);
+
+DELETE FROM emails;
+
+DROP TABLE email_stats;
+DROP TABLE emails_metadata;
+DROP TABLE emails;
+DROP DATABASE `a-b`;
diff --git a/mysql-test/suite/gcol/t/virtual_index_drop.test b/mysql-test/suite/gcol/t/virtual_index_drop.test
new file mode 100644
index 00000000000..016832b9e6d
--- /dev/null
+++ b/mysql-test/suite/gcol/t/virtual_index_drop.test
@@ -0,0 +1,71 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+--echo #
+--echo # MDEV-24971 InnoDB access freed virtual column
+--echo # after rollback of secondary index
+--echo #
+
+# Exclusive lock must not defer the index removal
+
+CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
+INSERT INTO t1(f1) VALUES(1), (1);
+--error ER_DUP_ENTRY
+ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=EXCLUSIVE;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+# If Shared lock and table doesn't have any other open handle
+# then InnoDB must not defer the index removal
+
+CREATE TABLE t1(f1 INT, f2 INT AS (f1 + 2) VIRTUAL)ENGINE=InnoDB;
+INSERT INTO t1(f1) VALUES(1), (1);
+--error ER_DUP_ENTRY
+ALTER TABLE t1 ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE, LOCK=SHARED;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+# InnoDB should store the newly dropped virtual column into
+# new_vcol_info in index when rollback of alter happens
+
+CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
+SET DEBUG_DBUG="+d,create_index_fail";
+SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
+SEND ALTER TABLE t1 ADD COLUMN f3 INT AS (f1) VIRTUAL, ADD INDEX(f2, f3);
+connect(con1,localhost,root,,,);
+SET DEBUG_SYNC="now WAIT_FOR con1_go";
+BEGIN;
+SELECT * FROM t1;
+SET DEBUG_SYNC="now SIGNAL alter_signal";
+connection default;
+--error ER_DUP_ENTRY
+reap;
+connection con1;
+rollback;
+connection default;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 INT, f2 INT AS (f1) VIRTUAL)ENGINE=InnoDB;
+SET DEBUG_DBUG="+d,create_index_fail";
+SET DEBUG_SYNC="innodb_inplace_alter_table_enter SIGNAL con1_go WAIT_FOR alter_signal";
+send ALTER TABLE t1 ADD INDEX(f2);
+connection con1;
+SET DEBUG_SYNC="now WAIT_FOR con1_go";
+BEGIN;
+INSERT INTO t1(f1) VALUES(1);
+SET DEBUG_SYNC="now SIGNAL alter_signal";
+connection default;
+--error ER_DUP_ENTRY
+reap;
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 CHAR(100), f2 CHAR(100) as (f1) VIRTUAL)ENGINE=InnoDB;
+--error ER_DUP_FIELDNAME
+ALTER TABLE t1 ADD COLUMN f3 CHAR(100) AS (f2) VIRTUAL, ADD INDEX(f3(10), f1, f3(12));
+DROP TABLE t1;
+SET DEBUG_SYNC=RESET;
diff --git a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc
deleted file mode 100644
index d9c0294faf5..00000000000
--- a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc
+++ /dev/null
@@ -1,152 +0,0 @@
---echo #
---echo # Testing robustness against random compression failures
---echo #
-
---source include/not_embedded.inc
---source include/have_innodb.inc
-
---let $simulate_comp_failures_save = `SELECT @@innodb_simulate_comp_failures`
-
---disable_query_log
-call mtr.add_suppression("InnoDB: Simulating a compression failure for table `test`\\.`t1`");
---enable_query_log
-
-# create the table with compressed pages of size 8K.
-CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
-
-SHOW CREATE TABLE t1;
-
-# percentage of compressions that will be forced to fail
-SET GLOBAL innodb_simulate_comp_failures = 25;
-
---disable_query_log
---disable_result_log
-
-let $num_inserts_ind = $num_inserts;
-let $commit_iterations=50;
-
-while ($num_inserts_ind)
-{
- let $repeat = `select floor(rand() * 10)`;
- eval INSERT INTO t1(id, msg)
- VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat));
- dec $num_inserts_ind;
-}
-
---enable_query_log
---enable_result_log
-
-COMMIT;
-SELECT COUNT(id) FROM t1;
-
---disable_query_log
---disable_result_log
-
-# do random ops, making sure that some pages will get fragmented and reorganized.
-let $num_ops_ind = $num_ops;
-let $commit_count= $commit_iterations;
-
-BEGIN;
-
-while($num_ops_ind)
-{
- let $idx = `select floor(rand()*$num_inserts)`;
- let $insert_or_update = `select floor(rand()*3)`;
-
- let $repeat = `select floor(rand() * 9) + 1`;
-
- let $msg = query_get_value(`select repeat('abcdefghijklmnopqrstuvwxyz', $repeat) as x`, x, 1);
-
- let $single_or_multi = `select floor(rand()*10)`;
-
- if ($insert_or_update)
- {
- let $cnt = query_get_value(SELECT COUNT(*) cnt FROM t1 WHERE id=$idx, cnt, 1);
-
- if ($cnt)
- {
- let $update = `select floor(rand()*2)`;
-
- if ($update)
- {
- if ($single_or_multi)
- {
- eval UPDATE t1 SET msg=\"$msg\" WHERE id=$idx;
- }
-
- if (!$single_or_multi)
- {
- eval UPDATE t1 SET msg=\"$msg\" WHERE id >= $idx - 100 AND id <= $idx + 100;
- }
-
- }
-
- if (!$update)
- {
- if ($single_or_multi)
- {
- eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id = VALUES(id);
- }
-
- if (!$single_or_multi)
- {
- let $diff = 200;
-
- while ($diff)
- {
- eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx + 100 - $diff) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id=VALUES(id);
-
- dec $diff;
- }
- }
- }
- }
-
- if (!$cnt)
- {
- let $null_msg = `select floor(rand()*2)`;
-
- if ($null_msg)
- {
- eval INSERT INTO t1(id,msg) VALUES ($idx, NULL);
- }
-
- if (!$null_msg)
- {
- eval INSERT INTO t1(id, msg) VALUES ($idx, \"$msg\");
- }
- }
- }
-
- if (!$insert_or_update)
- {
- if ($single_or_multi)
- {
- eval DELETE from t1 WHERE id=$idx;
- }
-
- if (!$single_or_multi)
- {
- eval DELETE from t1 WHERE id >= $idx - 100 AND id <= $idx + 100;
- }
- }
-
- dec $commit_count;
- if (!$commit_count)
- {
- let $commit_count= $commit_iterations;
- COMMIT;
- BEGIN;
- }
-
- dec $num_ops_ind;
-}
-
-COMMIT;
-
-# final cleanup
-DROP TABLE t1;
-
-eval SET GLOBAL innodb_simulate_comp_failures = $simulate_comp_failures_save;
-
---enable_query_log
diff --git a/mysql-test/suite/innodb/r/alter_large_dml.result b/mysql-test/suite/innodb/r/alter_large_dml.result
index 056e8fdd768..41f497a5591 100644
--- a/mysql-test/suite/innodb/r/alter_large_dml.result
+++ b/mysql-test/suite/innodb/r/alter_large_dml.result
@@ -14,16 +14,10 @@ SET DEBUG_SYNC = 'now SIGNAL dml_pause';
SET DEBUG_SYNC = 'now WAIT_FOR dml_restart';
ROLLBACK;
BEGIN;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
+INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_81920;
ROLLBACK;
BEGIN;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_114688;
ROLLBACK;
SET DEBUG_SYNC = 'now SIGNAL dml_done';
connect con2, localhost,root,,test;
diff --git a/mysql-test/suite/innodb/r/alter_mdl_timeout.result b/mysql-test/suite/innodb/r/alter_mdl_timeout.result
new file mode 100644
index 00000000000..7af1362c69e
--- /dev/null
+++ b/mysql-test/suite/innodb/r/alter_mdl_timeout.result
@@ -0,0 +1,23 @@
+create table t1(f1 char(10), f2 char(10) not null, f3 int not null,
+f4 int not null, primary key(f3))engine=innodb;
+insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4);
+SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert";
+SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update";
+ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE;
+connect con1,localhost,root,,,;
+SET DEBUG_SYNC="now WAIT_FOR con1_start";
+begin;
+INSERT INTO t1 VALUES('e','e',5, 5);
+SET DEBUG_SYNC="now SIGNAL con1_insert";
+SET DEBUG_SYNC="now WAIT_FOR con1_wait";
+SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback";
+UPDATE t1 set f4 = 10 order by f1 desc limit 2;
+connection default;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC="now SIGNAL alter_rollback";
+connection con1;
+commit;
+connection default;
+disconnect con1;
+DROP TABLE t1;
+SET DEBUG_SYNC="RESET";
diff --git a/mysql-test/suite/innodb/r/alter_table.result b/mysql-test/suite/innodb/r/alter_table.result
index 8330e25c70d..873baa905c6 100644
--- a/mysql-test/suite/innodb/r/alter_table.result
+++ b/mysql-test/suite/innodb/r/alter_table.result
@@ -70,6 +70,27 @@ ERROR HY000: Tablespace has been discarded for table `t`
ALTER TABLE t FORCE;
ERROR HY000: Tablespace has been discarded for table `t`
DROP TABLE t;
+#
+# MDEV-24763 ALTER TABLE fails to rename a column in SYS_FIELDS
+#
+CREATE TABLE t1 (a INT, b TEXT, c INT, PRIMARY KEY(b(9)), INDEX(c,a))
+ENGINE=InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a u INT;
+SELECT sf.* FROM information_schema.innodb_sys_fields sf
+INNER JOIN information_schema.innodb_sys_indexes si ON sf.index_id=si.index_id
+INNER JOIN information_schema.innodb_sys_tables st ON si.table_id=st.table_id
+WHERE st.name='test/t1' ORDER BY sf.index_id,sf.pos;
+INDEX_ID NAME POS
+ID b 0
+ID c 0
+ID u 1
+DROP TABLE t1;
+#
+# End of 10.2 tests
+#
+#
+# Check that innodb supports transactional=1
+#
create table t1 (a int) transactional=1 engine=aria;
create table t2 (a int) transactional=1 engine=innodb;
show create table t1;
@@ -85,3 +106,6 @@ t2 CREATE TABLE `t2` (
alter table t1 engine=innodb;
alter table t1 add column b int;
drop table t1,t2;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/suite/innodb/r/alter_varchar_change.result b/mysql-test/suite/innodb/r/alter_varchar_change.result
index ddf0449a040..9bf0d126588 100644
--- a/mysql-test/suite/innodb/r/alter_varchar_change.result
+++ b/mysql-test/suite/innodb/r/alter_varchar_change.result
@@ -492,11 +492,18 @@ DROP TABLE t1;
DROP PROCEDURE get_index_id;
DROP PROCEDURE get_table_id;
create table t (a varchar(100)) engine=innodb;
-select name, pos, mtype, prtype, len from information_schema.innodb_sys_columns where name='a';
+select sc.name, sc.pos, sc.mtype, sc.prtype, sc.len
+from information_schema.innodb_sys_columns sc
+inner join information_schema.innodb_sys_tables st
+on sc.table_id=st.table_id where st.name='test/t' and sc.name='a';
name pos mtype prtype len
a 0 1 524303 100
alter table t modify a varchar(110), algorithm=inplace;
-select name, pos, mtype, prtype, len from information_schema.innodb_sys_columns where name='a';
+select sc.name, sc.pos, sc.mtype, sc.prtype, sc.len
+from information_schema.innodb_sys_columns sc
+inner join information_schema.innodb_sys_tables st
+on sc.table_id=st.table_id where st.name='test/t' and sc.name='a';
name pos mtype prtype len
a 0 1 524303 110
drop table t;
+# End of 10.2 tests
diff --git a/mysql-test/suite/innodb/r/default_row_format_alter.result b/mysql-test/suite/innodb/r/default_row_format_alter.result
index 363954c1a6c..fa5adb32fb0 100644
--- a/mysql-test/suite/innodb/r/default_row_format_alter.result
+++ b/mysql-test/suite/innodb/r/default_row_format_alter.result
@@ -83,6 +83,16 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I
t1 InnoDB # Compact # # # # # # NULL # # NULL latin1_swedish_ci NULL 0 N
DROP TABLE t1;
#
+# MDEV-24758 heap-use-after-poison in innobase_add_instant_try/rec_copy
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY) CHARACTER SET utf8 ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL innodb_default_row_format = REDUNDANT;
+ALTER TABLE t1 ADD a CHAR(8) DEFAULT '';
+DROP TABLE t1;
+SET GLOBAL innodb_default_row_format = @row_format;
+# End of 10.3 tests
+#
# MDEV-23295 Assertion fields[i].same(instant.fields[i]) failed
#
SET GLOBAL innodb_default_row_format = @row_format;
@@ -104,4 +114,5 @@ SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
ROW_FORMAT
Dynamic
DROP TABLE t1;
+# End of 10.4 tests
SET GLOBAL innodb_default_row_format = @row_format;
diff --git a/mysql-test/suite/innodb/r/file_format_defaults.result b/mysql-test/suite/innodb/r/file_format_defaults.result
index 4fd280450aa..ab4d72258a5 100644
--- a/mysql-test/suite/innodb/r/file_format_defaults.result
+++ b/mysql-test/suite/innodb/r/file_format_defaults.result
@@ -8,7 +8,7 @@ SELECT @@innodb_file_per_table;
SET SQL_MODE=strict_all_tables;
CREATE TABLE tab0 (c1 VARCHAR(65530), KEY(c1(3073))) ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
SHOW CREATE TABLE tab0;
Table Create Table
tab0 CREATE TABLE `tab0` (
diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result
index 34d0c3dad64..c7332f3dfc6 100644
--- a/mysql-test/suite/innodb/r/foreign_key.result
+++ b/mysql-test/suite/innodb/r/foreign_key.result
@@ -801,6 +801,17 @@ ERROR 23000: Duplicate entry '10' for key 'ind9'
SET FOREIGN_KEY_CHECKS= 0;
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (pk);
DROP TABLE t1;
+SET FOREIGN_KEY_CHECKS= 1;
+#
+# MDEV-23455 Hangs + Sig11 in unknown location(s) due to single complex FK query
+#
+Parsing foreign keys 1...
+ERROR HY000: Can't create table `test`.`t0` (errno: 150 "Foreign key constraint is incorrectly formed")
+Parsing foreign keys 2...
+ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed")
+Parsing foreign keys 3...
+ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed")
+Parsing foreign keys 4...
# End of 10.2 tests
CREATE TABLE t1 (a GEOMETRY, INDEX(a(8)),
FOREIGN KEY (a) REFERENCES x (xx)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc.result b/mysql-test/suite/innodb/r/innodb-autoinc.result
index 273da66073e..1efd5c61af7 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc.result
@@ -789,7 +789,7 @@ t2 CREATE TABLE `t2` (
`n` int(10) unsigned NOT NULL,
`o` enum('FALSE','TRUE') DEFAULT NULL,
PRIMARY KEY (`m`)
-) ENGINE=InnoDB AUTO_INCREMENT=19 DEFAULT CHARSET=latin1
+) ENGINE=InnoDB AUTO_INCREMENT=11 DEFAULT CHARSET=latin1
INSERT INTO t1 (b,c) SELECT n,o FROM t2 ;
SHOW CREATE TABLE t1;
Table Create Table
diff --git a/mysql-test/suite/innodb/r/innodb-virtual-columns2.result b/mysql-test/suite/innodb/r/innodb-virtual-columns2.result
index 3574ba72849..99a1c610bd3 100644
--- a/mysql-test/suite/innodb/r/innodb-virtual-columns2.result
+++ b/mysql-test/suite/innodb/r/innodb-virtual-columns2.result
@@ -62,3 +62,28 @@ INSERT INTO t1 (i) VALUES (1),(2);
SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE;
y i b vi
DROP TABLE t1;
+#
+# MDEV-23632 ALTER TABLE...ADD KEY creates corrupted index on virtual column
+#
+CREATE TABLE t1(a INT PRIMARY KEY, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,1,default);
+ALTER TABLE t1 ADD COLUMN c INT;
+ALTER TABLE t1 ADD KEY(g);
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT g FROM t1 FORCE INDEX (g);
+g
+1
+DROP TABLE t1;
+CREATE TABLE t1(a INT, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,1,default);
+ALTER TABLE t1 ADD COLUMN c INT PRIMARY KEY;
+ALTER TABLE t1 ADD KEY(g);
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+SELECT g FROM t1 FORCE INDEX (g);
+g
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index b48a85a2443..bcdd799f9ff 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -2300,7 +2300,7 @@ drop table t1;
SET sql_mode = 'NO_ENGINE_SUBSTITUTION';
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
drop table t1;
create table t1 (v varchar(65536));
Warnings:
@@ -3319,3 +3319,20 @@ c1 c2
9 3
DROP TABLE t1;
DROP TABLE t2;
+#
+# MDEV-24748 Extern field check missing
+# in btr_index_rec_validate()
+#
+CREATE TABLE t1 (pk INT, c1 char(255),
+c2 char(255), c3 char(255), c4 char(255),
+c5 char(255), c6 char(255), c7 char(255),
+c8 char(255), primary key (pk)
+) CHARACTER SET utf32 ENGINE=InnoDB;
+INSERT INTO t1 VALUES
+(1, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'),
+(2, 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p');
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+ALTER TABLE t1 FORCE;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result b/mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result
new file mode 100644
index 00000000000..9879ef206f2
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb_buffer_pool_fail.result
@@ -0,0 +1,8 @@
+call mtr.add_suppression("InnoDB: Cannot allocate memory for the buffer pool");
+call mtr.add_suppression("InnoDB: Plugin initialization aborted at srv0start.cc.*");
+call mtr.add_suppression("Plugin 'InnoDB' init function returned error.");
+call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
+#
+# MDEV-25019 memory allocation failures during startup cause server failure in different, confusing ways
+#
+# restart: --debug_dbug=+d,ib_buf_chunk_init_fails
diff --git a/mysql-test/suite/innodb/r/innodb_multi_update.result b/mysql-test/suite/innodb/r/innodb_multi_update.result
index 64f9ebc2fc2..93bd4e6716c 100644
--- a/mysql-test/suite/innodb/r/innodb_multi_update.result
+++ b/mysql-test/suite/innodb/r/innodb_multi_update.result
@@ -81,4 +81,5 @@ CREATE TABLE t1(f1 INT) ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1;
ERROR 21000: Operand should contain 1 column(s)
+UPDATE (SELECT ((SELECT 1 FROM t1),1) = (1,1) FROM t1 WHERE (SELECT 1 FROM t1)) x, t1 AS d SET d.f1 = 1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result
deleted file mode 100644
index f35e4159603..00000000000
--- a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Testing robustness against random compression failures
-#
-CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `msg` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `msg_i` (`msg`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8
-SET GLOBAL innodb_simulate_comp_failures = 25;
-COMMIT;
-SELECT COUNT(id) FROM t1;
-COUNT(id)
-1500
diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result
deleted file mode 100644
index 099c673bca7..00000000000
--- a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Testing robustness against random compression failures
-#
-CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `msg` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `msg_i` (`msg`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8
-SET GLOBAL innodb_simulate_comp_failures = 25;
-COMMIT;
-SELECT COUNT(id) FROM t1;
-COUNT(id)
-1000
diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result
index e0661557159..110939b5fdb 100644
--- a/mysql-test/suite/innodb/r/instant_alter.result
+++ b/mysql-test/suite/innodb/r/instant_alter.result
@@ -2841,3 +2841,43 @@ t1 CREATE TABLE `t1` (
KEY `i1` (`a`) COMMENT 'comment2'
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
+#
+# MDEV-25057 Assertion `n_fields < dtuple_get_n_fields(entry)'
+# failed in dtuple_convert_big_rec
+#
+CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+a CHAR(255) NOT NULL,
+b CHAR(255) NOT NULL, c INT) ENGINE=InnoDB CHARSET utf32;
+ALTER TABLE t1 DROP c;
+INSERT INTO t1(a, b) SELECT '', '' FROM seq_1_to_16;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+16
+DROP TABLE t1;
+#
+# MDEV-25630 Rollback of instant operation adds wrong
+# column to secondary index
+#
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT, f4 INT,
+PRIMARY KEY(f1, f4),
+KEY(f2))ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INT, f2 INT, PRIMARY KEY(f1),
+FOREIGN KEY fk (f2) REFERENCES t2(f1)
+)ENGINE=InnoDB;
+ALTER TABLE t1 ADD f5 INT;
+SET FOREIGN_KEY_CHECKS=0;
+ALTER TABLE t1 DROP COLUMN f3, ADD FOREIGN KEY fk (f1)
+REFERENCES x(x);
+ERROR HY000: Failed to add the foreign key constraint 'test/fk' to system tables
+ALTER TABLE t1 DROP COLUMN f5;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` int(11) DEFAULT NULL,
+ `f3` int(11) DEFAULT NULL,
+ `f4` int(11) NOT NULL,
+ PRIMARY KEY (`f1`,`f4`),
+ KEY `f2` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/innodb/r/instant_alter_charset.result b/mysql-test/suite/innodb/r/instant_alter_charset.result
index 6b60c79b558..8b1171191fa 100644
--- a/mysql-test/suite/innodb/r/instant_alter_charset.result
+++ b/mysql-test/suite/innodb/r/instant_alter_charset.result
@@ -2032,3 +2032,14 @@ ALTER TABLE t1 MODIFY a VARCHAR(2)
CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
INSERT INTO t1 VALUES ('a');
DROP TABLE t1;
+#
+# MDEV-22775 [HY000][1553] Changing name of primary key column with foreign key constraint fails.
+#
+create table t1 (id int primary key) engine=innodb default charset=utf8;
+create table t2 (input_id int primary key, id int not null,
+key a (id),
+constraint a foreign key (id) references t1 (id)
+)engine=innodb default charset=utf8;
+alter table t1 change id id2 int;
+drop table t2;
+drop table t1;
diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result
index 9fcb8b05a34..9a681b77c76 100644
--- a/mysql-test/suite/innodb/r/instant_alter_debug.result
+++ b/mysql-test/suite/innodb/r/instant_alter_debug.result
@@ -193,6 +193,12 @@ SET DEBUG_SYNC='RESET';
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 SET a=0;
ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT;
+BEGIN NOT ATOMIC
+DECLARE c TEXT DEFAULT(SELECT CONCAT('ALTER TABLE t1 ADD (c',
+GROUP_CONCAT(seq SEPARATOR ' INT, c'), ' INT), ALGORITHM=INSTANT;') FROM seq_1_to_130);
+EXECUTE IMMEDIATE c;
+END;
+$$
connection stop_purge;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
@@ -211,7 +217,7 @@ SET DEBUG_SYNC = 'now SIGNAL logged';
connection ddl;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c FROM t1;
a b c
1 2 NULL
2 3 4
@@ -234,7 +240,7 @@ connection ddl;
UPDATE t1 SET b = b + 1 WHERE a = 2;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c FROM t1;
a b c
1 2 NULL
2 3 4
@@ -258,7 +264,7 @@ ERROR 22004: Invalid use of NULL value
disconnect ddl;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c, d FROM t1;
a b c d
1 2 NULL 1
2 3 4 1
@@ -291,44 +297,6 @@ a b vb
5 NULL NULL
DROP TABLE t1;
#
-# MDEV-17899 Assertion failures on rollback of instant ADD/DROP
-# MDEV-18098 Crash after rollback of instant DROP COLUMN
-#
-SET @save_dbug = @@SESSION.debug_dbug;
-SET debug_dbug='+d,ib_commit_inplace_fail_1';
-CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,2);
-ALTER TABLE t1 DROP COLUMN b;
-ERROR HY000: Internal error: Injected error!
-ALTER TABLE t1 DROP COLUMN b;
-ERROR HY000: Internal error: Injected error!
-ALTER TABLE t1 ADD COLUMN c INT;
-ERROR HY000: Internal error: Injected error!
-SELECT * FROM t1;
-a b
-1 2
-DROP TABLE t1;
-CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
-ALTER TABLE t1 ADD COLUMN c INT;
-ERROR HY000: Internal error: Injected error!
-BEGIN;
-INSERT INTO t1 VALUES(1, 1);
-ROLLBACK;
-ALTER TABLE t1 DROP COLUMN b;
-ERROR HY000: Internal error: Injected error!
-INSERT INTO t1 values (1,1);
-SELECT * FROM t1;
-a b
-1 1
-DROP TABLE t1;
-SET debug_dbug = @save_dbug;
-SELECT variable_value-@old_instant instants
-FROM information_schema.global_status
-WHERE variable_name = 'innodb_instant_alter_column';
-instants
-22
-SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
-#
# MDEV-21045 AddressSanitizer: use-after-poison in mem_heap_dup / row_log_table_get_pk_col
#
CREATE TABLE t1 (a TEXT) ENGINE = InnoDB ROW_FORMAT=REDUNDANT;
@@ -370,3 +338,115 @@ b
SET DEBUG_SYNC='RESET';
disconnect con2;
DROP TABLE t1;
+#
+# MDEV-24653 Assertion block->page.id.page_no() == index->page failed
+# in innobase_add_instant_try()
+#
+SET @saved_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug;
+SET GLOBAL innodb_limit_optimistic_insert_debug = 2;
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+ALTER TABLE t1 ADD COLUMN b INT;
+DELETE FROM t1;
+InnoDB 0 transactions not purged
+ALTER TABLE t1 ADD COLUMN c INT;
+SELECT * FROM t1;
+a b c
+DROP TABLE t1;
+SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
+#
+# MDEV-24796 Assertion page_has_next... failed
+# in btr_pcur_store_position()
+#
+CREATE TABLE t1 (c INT KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1),(2);
+SET GLOBAL innodb_limit_optimistic_insert_debug=2;
+ALTER TABLE t1 ADD COLUMN d INT;
+DELETE FROM t1;
+InnoDB 0 transactions not purged
+SELECT * FROM t1 WHERE c<>1 ORDER BY c DESC;
+c d
+DROP TABLE t1;
+SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
+#
+# MDEV-24620 ASAN heap-buffer-overflow in btr_pcur_restore_position()
+#
+CREATE TABLE t1 (a VARCHAR(1) PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+connect stop_purge,localhost,root,,;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+ALTER TABLE t1 ADD c INT;
+BEGIN;
+DELETE FROM t1;
+connect dml,localhost,root,,test;
+SET DEBUG_SYNC='row_mysql_handle_errors SIGNAL s1 WAIT_FOR s2';
+UPDATE t1 SET c=1;
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR s1';
+COMMIT;
+connection stop_purge;
+COMMIT;
+disconnect stop_purge;
+connection default;
+InnoDB 0 transactions not purged
+SET DEBUG_SYNC='now SIGNAL s2';
+connection dml;
+disconnect dml;
+connection default;
+SET DEBUG_SYNC=RESET;
+DROP TABLE t1;
+# End of 10.3 tests
+#
+# MDEV-17899 Assertion failures on rollback of instant ADD/DROP
+# MDEV-18098 Crash after rollback of instant DROP COLUMN
+#
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug='+d,ib_commit_inplace_fail_1';
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,2);
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+ALTER TABLE t1 ADD COLUMN c INT;
+ERROR HY000: Internal error: Injected error!
+SELECT * FROM t1;
+a b
+1 2
+DROP TABLE t1;
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+ALTER TABLE t1 ADD COLUMN c INT;
+ERROR HY000: Internal error: Injected error!
+BEGIN;
+INSERT INTO t1 VALUES(1, 1);
+ROLLBACK;
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+INSERT INTO t1 values (1,1);
+SELECT * FROM t1;
+a b
+1 1
+DROP TABLE t1;
+SET debug_dbug = @save_dbug;
+#
+# MDEV-24512 Assertion failed in rec_is_metadata()
+# in btr_discard_only_page_on_level()
+#
+SET @save_limit= @@GLOBAL.innodb_limit_optimistic_insert_debug;
+SET GLOBAL innodb_limit_optimistic_insert_debug=2;
+CREATE TABLE t1 (c CHAR(1) UNIQUE) ENGINE=InnoDB;
+ALTER TABLE t1 ADD c2 INT NOT NULL DEFAULT 0 FIRST;
+INSERT INTO t1 (c) VALUES ('x'),('d'),('r'),('f'),('y'),('u'),('m'),('d');
+ERROR 23000: Duplicate entry 'd' for key 'c'
+SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit;
+SELECT * FROM t1;
+c2 c
+DROP TABLE t1;
+# End of 10.4 tests
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+32
diff --git a/mysql-test/suite/innodb/r/mvcc_secondary.result b/mysql-test/suite/innodb/r/mvcc_secondary.result
new file mode 100644
index 00000000000..2289742e830
--- /dev/null
+++ b/mysql-test/suite/innodb/r/mvcc_secondary.result
@@ -0,0 +1,24 @@
+#
+# MDEV-25459 MVCC read from index on CHAR or VARCHAR wrongly omits rows
+#
+CREATE TABLE t1 (
+pk int PRIMARY KEY, c varchar(255) UNIQUE,
+d char(255), e varchar(255), f char(255), g char(255)
+) ENGINE=InnoDB ROW_FORMAT=DYNAMIC DEFAULT CHARACTER SET ucs2;
+INSERT INTO t1 VALUES
+(1,REPEAT('c',248),REPEAT('a',106),REPEAT('b',220),REPEAT('x',14),'');
+BEGIN;
+UPDATE t1 SET c=REPEAT('d',170);
+connect con1,localhost,root,,;
+SELECT pk FROM t1 FORCE INDEX (c);
+pk
+1
+connection default;
+COMMIT;
+connection con1;
+SELECT pk FROM t1 FORCE INDEX (c);
+pk
+1
+disconnect con1;
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result
index d7ea25fa117..0df99aecc46 100644
--- a/mysql-test/suite/innodb/r/temporary_table.result
+++ b/mysql-test/suite/innodb/r/temporary_table.result
@@ -678,3 +678,99 @@ SET FOREIGN_KEY_CHECKS = 0;
CREATE TEMPORARY TABLE t1(f1 INT NOT NULL,
FOREIGN KEY(f1) REFERENCES t0(f1))ENGINE=InnoDB;
ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed")
+CREATE TABLE t (c INT) ENGINE=InnoDB;
+INSERT INTO t VALUES(0);
+CREATE TEMPORARY TABLE t2 (c INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+INSERT INTO t2 SELECT * FROM t;
+COMMIT;
+DROP TABLE t, t2;
+CREATE TEMPORARY TABLE t (c INT,c2 INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+INSERT INTO t VALUES(0);
+ERROR 21S01: Column count doesn't match value count at row 1
+SAVEPOINT s;
+INSERT INTO t VALUES(0,0);
+COMMIT;
+DROP TABLE t;
+CREATE TEMPORARY TABLE t (c INT,c2 INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+INSERT INTO t VALUES(0);
+ERROR 21S01: Column count doesn't match value count at row 1
+SAVEPOINT s;
+INSERT INTO t VALUES(0,0);
+ROLLBACK;
+DROP TABLE t;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+COMMIT;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t(c INT) ENGINE=InnoDB;
+SET SESSION tx_read_only=TRUE;
+LOCK TABLE test.t READ;
+SELECT * FROM t;
+c
+INSERT INTO t VALUES(0xADC3);
+SET SESSION tx_read_only=FALSE;
+DROP TABLE t;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+COMMIT;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET b= 2;
+COMMIT;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY, b int, c varchar(255)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 1, repeat('a', 200));
+START TRANSACTION READ ONLY;
+UPDATE t1 SET b= 2, c=repeat('a', 250);
+COMMIT;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+ROLLBACK;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+DELETE FROM t1 WHERE a= 2;
+COMMIT;
+DROP TABLE t1;
+CREATE TEMPORARY TABLE tmp (a INT) ENGINE=InnoDB;
+INSERT INTO tmp () VALUES (),();
+SET TX_READ_ONLY= 1;
+INSERT INTO tmp SELECT * FROM tmp;
+SET TX_READ_ONLY= 0;
+DROP TABLE tmp;
+SET sql_mode='';
+SET GLOBAL tx_read_only=TRUE;
+CREATE TEMPORARY TABLE t (c INT);
+SET SESSION tx_read_only=DEFAULT;
+INSERT INTO t VALUES(1);
+INSERT INTO t SELECT * FROM t;
+SET SESSION tx_read_only=FALSE;
+SET GLOBAL tx_read_only=OFF;
+DROP TABLE t;
+CREATE TEMPORARY TABLE t(a INT);
+SET SESSION tx_read_only=ON;
+LOCK TABLE t READ;
+SELECT COUNT(*)FROM t;
+COUNT(*)
+0
+INSERT INTO t VALUES (0);
+SET SESSION tx_read_only=OFF;
+DROP TABLE t;
+CREATE TEMPORARY TABLE t (a INT) ENGINE=InnoDB;
+INSERT INTO t VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t SET a = NULL;
+ROLLBACK;
diff --git a/mysql-test/suite/innodb/r/truncate_foreign.result b/mysql-test/suite/innodb/r/truncate_foreign.result
index fc09b74d62f..12a41860708 100644
--- a/mysql-test/suite/innodb/r/truncate_foreign.result
+++ b/mysql-test/suite/innodb/r/truncate_foreign.result
@@ -57,3 +57,14 @@ disconnect dml;
connection default;
SET DEBUG_SYNC = RESET;
DROP TABLE child, parent;
+#
+# MDEV-24532 Table corruption ER_NO_SUCH_TABLE_IN_ENGINE or
+# ER_CRASHED_ON_USAGE after ALTER on table with foreign key
+#
+CREATE TABLE t1 (a INT, b INT, PRIMARY KEY (a)) ENGINE=InnoDB;
+ALTER TABLE t1 ADD FOREIGN KEY (b) REFERENCES t1 (a) ON UPDATE CASCADE;
+LOCK TABLE t1 WRITE;
+TRUNCATE TABLE t1;
+ALTER TABLE t1 ADD c INT;
+UNLOCK TABLES;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/alter_large_dml.test b/mysql-test/suite/innodb/t/alter_large_dml.test
index 5ab3f394115..54f5f171f05 100644
--- a/mysql-test/suite/innodb/t/alter_large_dml.test
+++ b/mysql-test/suite/innodb/t/alter_large_dml.test
@@ -25,17 +25,11 @@ SET DEBUG_SYNC = 'now WAIT_FOR dml_restart';
ROLLBACK;
BEGIN;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
-INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_16384;
+INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_81920;
ROLLBACK;
BEGIN;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT '','','','','','','','' FROM seq_1_to_114688;
ROLLBACK;
SET DEBUG_SYNC = 'now SIGNAL dml_done';
diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.opt b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt
new file mode 100644
index 00000000000..9e0e38bd64a
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt
@@ -0,0 +1 @@
+--lock_wait_timeout=2
diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.test b/mysql-test/suite/innodb/t/alter_mdl_timeout.test
new file mode 100644
index 00000000000..15e7f524fd0
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.test
@@ -0,0 +1,32 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+create table t1(f1 char(10), f2 char(10) not null, f3 int not null,
+ f4 int not null, primary key(f3))engine=innodb;
+insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4);
+SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert";
+SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update";
+send ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE;
+
+connect(con1,localhost,root,,,);
+SET DEBUG_SYNC="now WAIT_FOR con1_start";
+begin;
+INSERT INTO t1 VALUES('e','e',5, 5);
+SET DEBUG_SYNC="now SIGNAL con1_insert";
+SET DEBUG_SYNC="now WAIT_FOR con1_wait";
+SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback";
+SEND UPDATE t1 set f4 = 10 order by f1 desc limit 2;
+
+connection default;
+--error ER_LOCK_WAIT_TIMEOUT
+reap;
+SET DEBUG_SYNC="now SIGNAL alter_rollback";
+
+connection con1;
+reap;
+commit;
+
+connection default;
+disconnect con1;
+DROP TABLE t1;
+SET DEBUG_SYNC="RESET";
diff --git a/mysql-test/suite/innodb/t/alter_table.test b/mysql-test/suite/innodb/t/alter_table.test
index aca70e61bc6..2b84a37cdce 100644
--- a/mysql-test/suite/innodb/t/alter_table.test
+++ b/mysql-test/suite/innodb/t/alter_table.test
@@ -80,9 +80,26 @@ ALTER TABLE t ENGINE INNODB;
ALTER TABLE t FORCE;
DROP TABLE t;
-#
-# Check that innodb supports transactional=1
-#
+--echo #
+--echo # MDEV-24763 ALTER TABLE fails to rename a column in SYS_FIELDS
+--echo #
+CREATE TABLE t1 (a INT, b TEXT, c INT, PRIMARY KEY(b(9)), INDEX(c,a))
+ENGINE=InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a u INT;
+--replace_column 1 ID
+SELECT sf.* FROM information_schema.innodb_sys_fields sf
+INNER JOIN information_schema.innodb_sys_indexes si ON sf.index_id=si.index_id
+INNER JOIN information_schema.innodb_sys_tables st ON si.table_id=st.table_id
+WHERE st.name='test/t1' ORDER BY sf.index_id,sf.pos;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # Check that innodb supports transactional=1
+--echo #
create table t1 (a int) transactional=1 engine=aria;
create table t2 (a int) transactional=1 engine=innodb;
@@ -91,3 +108,8 @@ show create table t2;
alter table t1 engine=innodb;
alter table t1 add column b int;
drop table t1,t2;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
+
diff --git a/mysql-test/suite/innodb/t/alter_varchar_change.test b/mysql-test/suite/innodb/t/alter_varchar_change.test
index 65dc38a3411..d1ce5d7403e 100644
--- a/mysql-test/suite/innodb/t/alter_varchar_change.test
+++ b/mysql-test/suite/innodb/t/alter_varchar_change.test
@@ -361,7 +361,15 @@ DROP PROCEDURE get_table_id;
# LEN must increase here
create table t (a varchar(100)) engine=innodb;
-select name, pos, mtype, prtype, len from information_schema.innodb_sys_columns where name='a';
+select sc.name, sc.pos, sc.mtype, sc.prtype, sc.len
+from information_schema.innodb_sys_columns sc
+inner join information_schema.innodb_sys_tables st
+on sc.table_id=st.table_id where st.name='test/t' and sc.name='a';
alter table t modify a varchar(110), algorithm=inplace;
-select name, pos, mtype, prtype, len from information_schema.innodb_sys_columns where name='a';
+select sc.name, sc.pos, sc.mtype, sc.prtype, sc.len
+from information_schema.innodb_sys_columns sc
+inner join information_schema.innodb_sys_tables st
+on sc.table_id=st.table_id where st.name='test/t' and sc.name='a';
drop table t;
+
+--echo # End of 10.2 tests
diff --git a/mysql-test/suite/innodb/t/default_row_format_alter.test b/mysql-test/suite/innodb/t/default_row_format_alter.test
index 1f0b0d56bc4..7cd4d672858 100644
--- a/mysql-test/suite/innodb/t/default_row_format_alter.test
+++ b/mysql-test/suite/innodb/t/default_row_format_alter.test
@@ -97,6 +97,19 @@ SHOW TABLE STATUS LIKE 't1';
DROP TABLE t1;
--echo #
+--echo # MDEV-24758 heap-use-after-poison in innobase_add_instant_try/rec_copy
+--echo #
+
+CREATE TABLE t1 (pk INT PRIMARY KEY) CHARACTER SET utf8 ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL innodb_default_row_format = REDUNDANT;
+ALTER TABLE t1 ADD a CHAR(8) DEFAULT '';
+DROP TABLE t1;
+SET GLOBAL innodb_default_row_format = @row_format;
+
+--echo # End of 10.3 tests
+
+--echo #
--echo # MDEV-23295 Assertion fields[i].same(instant.fields[i]) failed
--echo #
SET GLOBAL innodb_default_row_format = @row_format;
@@ -116,4 +129,6 @@ ALTER TABLE t1 DROP b;
SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
DROP TABLE t1;
+--echo # End of 10.4 tests
+
SET GLOBAL innodb_default_row_format = @row_format;
diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test
index 41299e4bc35..a413ea43646 100644
--- a/mysql-test/suite/innodb/t/foreign_key.test
+++ b/mysql-test/suite/innodb/t/foreign_key.test
@@ -782,6 +782,45 @@ ALTER TABLE t1 ADD UNIQUE INDEX ind9 (b), LOCK=SHARED;
SET FOREIGN_KEY_CHECKS= 0;
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (pk);
DROP TABLE t1;
+SET FOREIGN_KEY_CHECKS= 1;
+
+--echo #
+--echo # MDEV-23455 Hangs + Sig11 in unknown location(s) due to single complex FK query
+--echo #
+let $constr_prefix= aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
+let $fk_ref= xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx;
+let $fk_field= yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy;
+let $constr_count= 200; # each 100 constrs is 1 sec of test execution
+let $i= 0;
+
+while ($i < $constr_count)
+{
+ let $p= $constr_prefix$i;
+ let $constr= CONSTRAINT $p FOREIGN KEY ($fk_field) REFERENCES t1($fk_ref) ON UPDATE SET NULL;
+ if ($i)
+ {
+ let $constrs= $constrs, $constr;
+ }
+ if (!$i)
+ {
+ let $constrs= $constr;
+ }
+ inc $i;
+}
+--disable_query_log
+--echo Parsing foreign keys 1...
+--error ER_CANT_CREATE_TABLE
+eval create table t0($fk_field int, $constrs) engine innodb;
+--echo Parsing foreign keys 2...
+--error ER_CANT_CREATE_TABLE
+eval create table t1($fk_field int, $constrs) engine innodb;
+--echo Parsing foreign keys 3...
+--error ER_CANT_CREATE_TABLE
+eval create table t1($fk_ref int, $fk_field int, $constrs) engine innodb;
+--echo Parsing foreign keys 4...
+eval create table t1($fk_ref int primary key, $fk_field int, $constrs) engine innodb;
+drop table t1;
+--enable_query_log
--echo # End of 10.2 tests
diff --git a/mysql-test/suite/innodb/t/innodb-virtual-columns2.test b/mysql-test/suite/innodb/t/innodb-virtual-columns2.test
index 474a6354576..13ecffcc896 100644
--- a/mysql-test/suite/innodb/t/innodb-virtual-columns2.test
+++ b/mysql-test/suite/innodb/t/innodb-virtual-columns2.test
@@ -52,3 +52,23 @@ SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE;
INSERT INTO t1 (i) VALUES (1),(2);
SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-23632 ALTER TABLE...ADD KEY creates corrupted index on virtual column
+--echo #
+
+CREATE TABLE t1(a INT PRIMARY KEY, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,1,default);
+ALTER TABLE t1 ADD COLUMN c INT;
+ALTER TABLE t1 ADD KEY(g);
+CHECK TABLE t1;
+SELECT g FROM t1 FORCE INDEX (g);
+DROP TABLE t1;
+
+CREATE TABLE t1(a INT, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,1,default);
+ALTER TABLE t1 ADD COLUMN c INT PRIMARY KEY; # Triggers `new_clustered`
+ALTER TABLE t1 ADD KEY(g);
+CHECK TABLE t1;
+SELECT g FROM t1 FORCE INDEX (g);
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test
index a81e6c3f900..ba5126e4757 100644
--- a/mysql-test/suite/innodb/t/innodb.test
+++ b/mysql-test/suite/innodb/t/innodb.test
@@ -2592,3 +2592,21 @@ SELECT * FROM t2;
DROP TABLE t1;
DROP TABLE t2;
+
+--echo #
+--echo # MDEV-24748 Extern field check missing
+--echo # in btr_index_rec_validate()
+--echo #
+CREATE TABLE t1 (pk INT, c1 char(255),
+c2 char(255), c3 char(255), c4 char(255),
+c5 char(255), c6 char(255), c7 char(255),
+c8 char(255), primary key (pk)
+) CHARACTER SET utf32 ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES
+ (1, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'),
+ (2, 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p');
+CHECK TABLE t1;
+ALTER TABLE t1 FORCE;
+# Cleanup
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test
new file mode 100644
index 00000000000..1d938e12e78
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_fail.test
@@ -0,0 +1,11 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+call mtr.add_suppression("InnoDB: Cannot allocate memory for the buffer pool");
+call mtr.add_suppression("InnoDB: Plugin initialization aborted at srv0start.cc.*");
+call mtr.add_suppression("Plugin 'InnoDB' init function returned error.");
+call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
+--echo #
+--echo # MDEV-25019 memory allocation failures during startup cause server failure in different, confusing ways
+--echo #
+let restart_parameters=--debug_dbug="+d,ib_buf_chunk_init_fails";
+--source include/restart_mysqld.inc
diff --git a/mysql-test/suite/innodb/t/innodb_bug60049-master.opt b/mysql-test/suite/innodb/t/innodb_bug60049-master.opt
deleted file mode 100644
index 22a5d4ed221..00000000000
--- a/mysql-test/suite/innodb/t/innodb_bug60049-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb_fast_shutdown=0
diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test
deleted file mode 100644
index cb05ca297ea..00000000000
--- a/mysql-test/suite/innodb/t/innodb_bug60049.test
+++ /dev/null
@@ -1,49 +0,0 @@
-# Bug #60049 Verify that purge leaves no garbage in unique secondary indexes
-# This test requires a fresh server start-up and a slow shutdown.
-# This was a suspected bug (not a bug).
-
--- source include/not_embedded.inc
--- source include/have_innodb.inc
--- source include/have_innodb_16k.inc
-
--- disable_query_log
-call mtr.add_suppression('\\[ERROR\\] InnoDB: Table `mysql`.`innodb_(table|index)_stats` not found');
-call mtr.add_suppression('\\[ERROR\\] InnoDB: Fetch of persistent statistics requested for table `mysql`.`gtid_executed`');
-
-let $create1 = query_get_value(SHOW CREATE TABLE mysql.innodb_table_stats, Create Table, 1);
-let $create2 = query_get_value(SHOW CREATE TABLE mysql.innodb_index_stats, Create Table, 1);
-DROP TABLE mysql.innodb_index_stats;
-DROP TABLE mysql.innodb_table_stats;
--- enable_query_log
-
-CREATE TABLE t(a INT)ENGINE=InnoDB STATS_PERSISTENT=0;
-RENAME TABLE t TO u;
-DROP TABLE u;
-SELECT @@innodb_fast_shutdown;
-let $MYSQLD_DATADIR=`select @@datadir`;
-
---source include/shutdown_mysqld.inc
-
-# Check the tail of ID_IND (SYS_TABLES.ID)
-let IBDATA1=$MYSQLD_DATADIR/ibdata1;
-perl;
-my $file = $ENV{'IBDATA1'};
-open(FILE, "<$file") || die "Unable to open $file";
-# Read DICT_HDR_TABLE_IDS, the root page number of ID_IND (SYS_TABLES.ID).
-seek(FILE, 7*16384+38+36, 0) || die "Unable to seek $file";
-die unless read(FILE, $_, 4) == 4;
-my $sys_tables_id_root = unpack("N", $_);
-print "Last record of ID_IND root page ($sys_tables_id_root):\n";
-# This should be the last record in ID_IND. Dump it in hexadecimal.
-seek(FILE, $sys_tables_id_root*16384 + 152, 0) || die "Unable to seek $file";
-read(FILE, $_, 32) || die "Unable to read $file";
-close(FILE);
-print unpack("H*", $_), "\n";
-EOF
-
---source include/start_mysqld.inc
-
--- disable_query_log
-USE mysql;
-eval $create1;
-eval $create2;
diff --git a/mysql-test/suite/innodb/t/innodb_multi_update.test b/mysql-test/suite/innodb/t/innodb_multi_update.test
index 8d5283a9ed5..74a7aea7d13 100644
--- a/mysql-test/suite/innodb/t/innodb_multi_update.test
+++ b/mysql-test/suite/innodb/t/innodb_multi_update.test
@@ -35,4 +35,5 @@ CREATE TABLE t1(f1 INT) ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
--error ER_OPERAND_COLUMNS
UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1;
+UPDATE (SELECT ((SELECT 1 FROM t1),1) = (1,1) FROM t1 WHERE (SELECT 1 FROM t1)) x, t1 AS d SET d.f1 = 1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt
deleted file mode 100644
index 39b205c9b68..00000000000
--- a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt
+++ /dev/null
@@ -1,2 +0,0 @@
---innodb-file-per-table
---skip-innodb-doublewrite
diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test
deleted file mode 100644
index 5a4978c9b37..00000000000
--- a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test
+++ /dev/null
@@ -1,9 +0,0 @@
---source include/big_test.inc
-# test takes too long with valgrind
---source include/not_valgrind.inc
---source include/have_debug.inc
---let $num_inserts = 1500
---let $num_ops = 3500
---source suite/innodb/include/innodb_simulate_comp_failures.inc
-# clean exit
---exit
diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt
deleted file mode 100644
index fae32059249..00000000000
--- a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt
+++ /dev/null
@@ -1,2 +0,0 @@
---innodb-file-per-table
-
diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test
deleted file mode 100644
index 79a16d36917..00000000000
--- a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test
+++ /dev/null
@@ -1,8 +0,0 @@
---source include/have_debug.inc
---source include/not_valgrind.inc
-
---let $num_inserts = 1000
---let $num_ops = 30
---source suite/innodb/include/innodb_simulate_comp_failures.inc
-# clean exit
---exit
diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test
index 83dca4cb5a6..3872f080d6b 100644
--- a/mysql-test/suite/innodb/t/instant_alter.test
+++ b/mysql-test/suite/innodb/t/instant_alter.test
@@ -1,4 +1,5 @@
--source include/innodb_page_size.inc
+--source include/have_sequence.inc
--echo #
--echo # MDEV-11369: Instant ADD COLUMN for InnoDB
@@ -903,3 +904,36 @@ CREATE INDEX i1 ON t1(a) COMMENT 'comment1';
ALTER TABLE t1 DROP INDEX i1, ADD INDEX i1(a) COMMENT 'comment2', ALGORITHM=INSTANT;
SHOW CREATE TABLE t1;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-25057 Assertion `n_fields < dtuple_get_n_fields(entry)'
+--echo # failed in dtuple_convert_big_rec
+--echo #
+CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ a CHAR(255) NOT NULL,
+ b CHAR(255) NOT NULL, c INT) ENGINE=InnoDB CHARSET utf32;
+ALTER TABLE t1 DROP c;
+INSERT INTO t1(a, b) SELECT '', '' FROM seq_1_to_16;
+SELECT COUNT(*) FROM t1;
+# Cleanup
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-25630 Rollback of instant operation adds wrong
+--echo # column to secondary index
+--echo #
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT, f4 INT,
+ PRIMARY KEY(f1, f4),
+ KEY(f2))ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INT, f2 INT, PRIMARY KEY(f1),
+ FOREIGN KEY fk (f2) REFERENCES t2(f1)
+ )ENGINE=InnoDB;
+
+ALTER TABLE t1 ADD f5 INT;
+SET FOREIGN_KEY_CHECKS=0;
+--error ER_FK_FAIL_ADD_SYSTEM
+ALTER TABLE t1 DROP COLUMN f3, ADD FOREIGN KEY fk (f1)
+ REFERENCES x(x);
+ALTER TABLE t1 DROP COLUMN f5;
+SHOW CREATE TABLE t1;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/innodb/t/instant_alter_charset.test b/mysql-test/suite/innodb/t/instant_alter_charset.test
index a5ddd49830c..1d444b88a7f 100644
--- a/mysql-test/suite/innodb/t/instant_alter_charset.test
+++ b/mysql-test/suite/innodb/t/instant_alter_charset.test
@@ -843,3 +843,17 @@ ALTER TABLE t1 MODIFY a VARCHAR(2)
CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
INSERT INTO t1 VALUES ('a');
DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-22775 [HY000][1553] Changing name of primary key column with foreign key constraint fails.
+--echo #
+
+create table t1 (id int primary key) engine=innodb default charset=utf8;
+create table t2 (input_id int primary key, id int not null,
+ key a (id),
+ constraint a foreign key (id) references t1 (id)
+)engine=innodb default charset=utf8;
+alter table t1 change id id2 int;
+drop table t2;
+drop table t1;
diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.combinations b/mysql-test/suite/innodb/t/instant_alter_debug.combinations
new file mode 100644
index 00000000000..f3bc2cc0c25
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_debug.combinations
@@ -0,0 +1,4 @@
+[redundant]
+innodb_default_row_format=redundant
+[dynamic]
+innodb_default_row_format=dynamic
diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test
index fe80de2ca51..b93b9dd8f1b 100644
--- a/mysql-test/suite/innodb/t/instant_alter_debug.test
+++ b/mysql-test/suite/innodb/t/instant_alter_debug.test
@@ -1,6 +1,7 @@
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
+--source include/have_sequence.inc
SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
@@ -216,6 +217,15 @@ CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 SET a=0;
ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2, ADD COLUMN c INT;
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE c TEXT DEFAULT(SELECT CONCAT('ALTER TABLE t1 ADD (c',
+ GROUP_CONCAT(seq SEPARATOR ' INT, c'), ' INT), ALGORITHM=INSTANT;') FROM seq_1_to_130);
+ EXECUTE IMMEDIATE c;
+END;
+$$
+DELIMITER ;$$
+
connection stop_purge;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
@@ -233,6 +243,7 @@ connection default;
SET DEBUG_SYNC = 'now WAIT_FOR copied';
let $wait_all_purged = 1;
--source include/wait_all_purged.inc
+let $wait_all_purged = 0;
INSERT INTO t1 SET a=1;
INSERT INTO t1 SET a=2,b=3,c=4;
SET DEBUG_SYNC = 'now SIGNAL logged';
@@ -242,7 +253,7 @@ reap;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c FROM t1;
ALTER TABLE t1 DROP b, ALGORITHM=INSTANT;
connection stop_purge;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
@@ -260,6 +271,7 @@ connection default;
SET DEBUG_SYNC = 'now WAIT_FOR copied';
let $wait_all_purged = 1;
--source include/wait_all_purged.inc
+let $wait_all_purged = 0;
INSERT INTO t1 SET a=1;
INSERT INTO t1 SET a=2,c=4;
SET DEBUG_SYNC = 'now SIGNAL logged';
@@ -270,7 +282,7 @@ UPDATE t1 SET b = b + 1 WHERE a = 2;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c FROM t1;
--echo #
--echo # MDEV-15872 Crash in online ALTER TABLE...ADD PRIMARY KEY
@@ -297,7 +309,7 @@ disconnect ddl;
connection default;
SET DEBUG_SYNC = RESET;
-SELECT * FROM t1;
+SELECT a, b, c, d FROM t1;
DROP TABLE t1;
--echo #
@@ -326,44 +338,6 @@ SELECT * FROM t1;
DROP TABLE t1;
--echo #
---echo # MDEV-17899 Assertion failures on rollback of instant ADD/DROP
---echo # MDEV-18098 Crash after rollback of instant DROP COLUMN
---echo #
-
-SET @save_dbug = @@SESSION.debug_dbug;
-SET debug_dbug='+d,ib_commit_inplace_fail_1';
-CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,2);
---error ER_INTERNAL_ERROR
-ALTER TABLE t1 DROP COLUMN b;
---error ER_INTERNAL_ERROR
-ALTER TABLE t1 DROP COLUMN b;
---error ER_INTERNAL_ERROR
-ALTER TABLE t1 ADD COLUMN c INT;
-SELECT * FROM t1;
-DROP TABLE t1;
-
-CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
---error ER_INTERNAL_ERROR
-ALTER TABLE t1 ADD COLUMN c INT;
-BEGIN;
-INSERT INTO t1 VALUES(1, 1);
-ROLLBACK;
---error ER_INTERNAL_ERROR
-ALTER TABLE t1 DROP COLUMN b;
-INSERT INTO t1 values (1,1);
-SELECT * FROM t1;
-DROP TABLE t1;
-
-SET debug_dbug = @save_dbug;
-
-SELECT variable_value-@old_instant instants
-FROM information_schema.global_status
-WHERE variable_name = 'innodb_instant_alter_column';
-
-SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
-
---echo #
--echo # MDEV-21045 AddressSanitizer: use-after-poison in mem_heap_dup / row_log_table_get_pk_col
--echo #
CREATE TABLE t1 (a TEXT) ENGINE = InnoDB ROW_FORMAT=REDUNDANT;
@@ -414,3 +388,134 @@ SELECT * FROM t1;
SET DEBUG_SYNC='RESET';
--disconnect con2
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-24653 Assertion block->page.id.page_no() == index->page failed
+--echo # in innobase_add_instant_try()
+--echo #
+
+SET @saved_limit = @@GLOBAL.innodb_limit_optimistic_insert_debug;
+SET GLOBAL innodb_limit_optimistic_insert_debug = 2;
+
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+ALTER TABLE t1 ADD COLUMN b INT;
+DELETE FROM t1;
+--source include/wait_all_purged.inc
+ALTER TABLE t1 ADD COLUMN c INT;
+
+SELECT * FROM t1;
+DROP TABLE t1;
+SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
+
+--echo #
+--echo # MDEV-24796 Assertion page_has_next... failed
+--echo # in btr_pcur_store_position()
+--echo #
+
+CREATE TABLE t1 (c INT KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1),(2);
+SET GLOBAL innodb_limit_optimistic_insert_debug=2;
+ALTER TABLE t1 ADD COLUMN d INT;
+DELETE FROM t1;
+--source include/wait_all_purged.inc
+SELECT * FROM t1 WHERE c<>1 ORDER BY c DESC;
+DROP TABLE t1;
+
+SET GLOBAL innodb_limit_optimistic_insert_debug = @saved_limit;
+
+--echo #
+--echo # MDEV-24620 ASAN heap-buffer-overflow in btr_pcur_restore_position()
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(1) PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+connect (stop_purge,localhost,root,,);
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+connection default;
+ALTER TABLE t1 ADD c INT;
+BEGIN;
+DELETE FROM t1;
+
+connect (dml,localhost,root,,test);
+SET DEBUG_SYNC='row_mysql_handle_errors SIGNAL s1 WAIT_FOR s2';
+send UPDATE t1 SET c=1;
+
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR s1';
+COMMIT;
+
+connection stop_purge;
+COMMIT;
+disconnect stop_purge;
+
+connection default;
+--source include/wait_all_purged.inc
+SET DEBUG_SYNC='now SIGNAL s2';
+
+connection dml;
+reap;
+disconnect dml;
+
+connection default;
+SET DEBUG_SYNC=RESET;
+DROP TABLE t1;
+
+--echo # End of 10.3 tests
+
+--echo #
+--echo # MDEV-17899 Assertion failures on rollback of instant ADD/DROP
+--echo # MDEV-18098 Crash after rollback of instant DROP COLUMN
+--echo #
+
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug='+d,ib_commit_inplace_fail_1';
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,2);
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 ADD COLUMN c INT;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 ADD COLUMN c INT;
+BEGIN;
+INSERT INTO t1 VALUES(1, 1);
+ROLLBACK;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 values (1,1);
+SELECT * FROM t1;
+DROP TABLE t1;
+
+SET debug_dbug = @save_dbug;
+
+--echo #
+--echo # MDEV-24512 Assertion failed in rec_is_metadata()
+--echo # in btr_discard_only_page_on_level()
+--echo #
+
+SET @save_limit= @@GLOBAL.innodb_limit_optimistic_insert_debug;
+SET GLOBAL innodb_limit_optimistic_insert_debug=2;
+CREATE TABLE t1 (c CHAR(1) UNIQUE) ENGINE=InnoDB;
+
+ALTER TABLE t1 ADD c2 INT NOT NULL DEFAULT 0 FIRST;
+--error ER_DUP_ENTRY
+INSERT INTO t1 (c) VALUES ('x'),('d'),('r'),('f'),('y'),('u'),('m'),('d');
+SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo # End of 10.4 tests
+
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
diff --git a/mysql-test/suite/innodb/t/mvcc_secondary.test b/mysql-test/suite/innodb/t/mvcc_secondary.test
new file mode 100644
index 00000000000..93c91c40076
--- /dev/null
+++ b/mysql-test/suite/innodb/t/mvcc_secondary.test
@@ -0,0 +1,26 @@
+--source include/innodb_page_size_small.inc
+
+--echo #
+--echo # MDEV-25459 MVCC read from index on CHAR or VARCHAR wrongly omits rows
+--echo #
+
+CREATE TABLE t1 (
+ pk int PRIMARY KEY, c varchar(255) UNIQUE,
+ d char(255), e varchar(255), f char(255), g char(255)
+) ENGINE=InnoDB ROW_FORMAT=DYNAMIC DEFAULT CHARACTER SET ucs2;
+
+INSERT INTO t1 VALUES
+(1,REPEAT('c',248),REPEAT('a',106),REPEAT('b',220),REPEAT('x',14),'');
+
+BEGIN;
+UPDATE t1 SET c=REPEAT('d',170);
+
+connect (con1,localhost,root,,);
+SELECT pk FROM t1 FORCE INDEX (c);
+connection default;
+COMMIT;
+connection con1;
+SELECT pk FROM t1 FORCE INDEX (c);
+disconnect con1;
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test
index 8e3ddf95634..13c203b587b 100644
--- a/mysql-test/suite/innodb/t/temporary_table.test
+++ b/mysql-test/suite/innodb/t/temporary_table.test
@@ -502,3 +502,110 @@ SET FOREIGN_KEY_CHECKS = 0;
--error ER_CANT_CREATE_TABLE
CREATE TEMPORARY TABLE t1(f1 INT NOT NULL,
FOREIGN KEY(f1) REFERENCES t0(f1))ENGINE=InnoDB;
+
+CREATE TABLE t (c INT) ENGINE=InnoDB;
+INSERT INTO t VALUES(0);
+CREATE TEMPORARY TABLE t2 (c INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+INSERT INTO t2 SELECT * FROM t;
+COMMIT;
+DROP TABLE t, t2;
+
+CREATE TEMPORARY TABLE t (c INT,c2 INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+--error ER_WRONG_VALUE_COUNT_ON_ROW
+INSERT INTO t VALUES(0);
+SAVEPOINT s;
+INSERT INTO t VALUES(0,0);
+COMMIT;
+DROP TABLE t;
+
+CREATE TEMPORARY TABLE t (c INT,c2 INT) ENGINE=InnoDB;
+START TRANSACTION READ ONLY;
+--error ER_WRONG_VALUE_COUNT_ON_ROW
+INSERT INTO t VALUES(0);
+SAVEPOINT s;
+INSERT INTO t VALUES(0,0);
+ROLLBACK;
+DROP TABLE t;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+COMMIT;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t(c INT) ENGINE=InnoDB;
+SET SESSION tx_read_only=TRUE;
+LOCK TABLE test.t READ;
+SELECT * FROM t;
+INSERT INTO t VALUES(0xADC3);
+SET SESSION tx_read_only=FALSE;
+DROP TABLE t;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+COMMIT;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET b= 2;
+COMMIT;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY, b int, c varchar(255)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 1, repeat('a', 200));
+START TRANSACTION READ ONLY;
+UPDATE t1 SET b= 2, c=repeat('a', 250);
+COMMIT;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t1 SET a= 2;
+ROLLBACK;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+START TRANSACTION READ ONLY;
+DELETE FROM t1 WHERE a= 2;
+COMMIT;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE tmp (a INT) ENGINE=InnoDB;
+INSERT INTO tmp () VALUES (),();
+SET TX_READ_ONLY= 1;
+INSERT INTO tmp SELECT * FROM tmp;
+SET TX_READ_ONLY= 0;
+DROP TABLE tmp;
+
+SET sql_mode='';
+SET GLOBAL tx_read_only=TRUE;
+CREATE TEMPORARY TABLE t (c INT);
+SET SESSION tx_read_only=DEFAULT;
+INSERT INTO t VALUES(1);
+INSERT INTO t SELECT * FROM t;
+SET SESSION tx_read_only=FALSE;
+SET GLOBAL tx_read_only=OFF;
+DROP TABLE t;
+
+CREATE TEMPORARY TABLE t(a INT);
+SET SESSION tx_read_only=ON;
+LOCK TABLE t READ;
+SELECT COUNT(*)FROM t;
+INSERT INTO t VALUES (0);
+SET SESSION tx_read_only=OFF;
+DROP TABLE t;
+
+CREATE TEMPORARY TABLE t (a INT) ENGINE=InnoDB;
+INSERT INTO t VALUES (1);
+START TRANSACTION READ ONLY;
+UPDATE t SET a = NULL;
+ROLLBACK;
diff --git a/mysql-test/suite/innodb/t/truncate_foreign.test b/mysql-test/suite/innodb/t/truncate_foreign.test
index d9d647e69f0..1c150e5db40 100644
--- a/mysql-test/suite/innodb/t/truncate_foreign.test
+++ b/mysql-test/suite/innodb/t/truncate_foreign.test
@@ -67,3 +67,16 @@ connection default;
SET DEBUG_SYNC = RESET;
DROP TABLE child, parent;
+
+--echo #
+--echo # MDEV-24532 Table corruption ER_NO_SUCH_TABLE_IN_ENGINE or
+--echo # ER_CRASHED_ON_USAGE after ALTER on table with foreign key
+--echo #
+
+CREATE TABLE t1 (a INT, b INT, PRIMARY KEY (a)) ENGINE=InnoDB;
+ALTER TABLE t1 ADD FOREIGN KEY (b) REFERENCES t1 (a) ON UPDATE CASCADE;
+LOCK TABLE t1 WRITE;
+TRUNCATE TABLE t1;
+ALTER TABLE t1 ADD c INT;
+UNLOCK TABLES;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb_fts/r/create.result b/mysql-test/suite/innodb_fts/r/create.result
index 55c5c45f643..3ca24f5253d 100644
--- a/mysql-test/suite/innodb_fts/r/create.result
+++ b/mysql-test/suite/innodb_fts/r/create.result
@@ -178,3 +178,13 @@ Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save;
+#
+# MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields
+#
+create table t1 (
+f1 int, f2 text,
+FTS_DOC_ID bigint unsigned not null,
+unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1),
+fulltext (f2))
+engine=innodb;
+ERROR 42000: Incorrect index name 'FTS_DOC_ID_INDEX'
diff --git a/mysql-test/suite/innodb_fts/r/fulltext.result b/mysql-test/suite/innodb_fts/r/fulltext.result
index 0e30dd0be05..abcdebca01d 100644
--- a/mysql-test/suite/innodb_fts/r/fulltext.result
+++ b/mysql-test/suite/innodb_fts/r/fulltext.result
@@ -689,3 +689,33 @@ FTS_DOC_ID t
2 foo bar
3 foo
DROP TABLE t;
+#
+# MDEV-25295 Aborted FTS_DOC_ID_INDEX considered as
+# existing FTS_DOC_ID_INDEX during DDL
+#
+SET sql_mode='';
+CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED NOT NULL,title CHAR(1),body TEXT)engine=innodb;
+INSERT INTO t1 (FTS_DOC_ID,title,body)VALUES(1,0,0), (1,0,0);
+CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
+ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+DROP TABLE t1;
+SET sql_mode = DEFAULT;
+#
+# MDEV-25070 SIGSEGV in fts_create_in_mem_aux_table
+#
+CREATE TABLE t1 (a CHAR, FULLTEXT KEY(a)) ENGINE=InnoDB;
+ALTER TABLE t1 DISCARD TABLESPACE;
+ALTER TABLE t1 ADD FULLTEXT INDEX (a);
+Warnings:
+Warning 1814 Tablespace has been discarded for table `t1`
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` char(1) DEFAULT NULL,
+ FULLTEXT KEY `a` (`a`),
+ FULLTEXT KEY `a_2` (`a`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+# End of 10.3 tests
diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result
index 7d98aba7fc3..b50bf047265 100644
--- a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result
+++ b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result
@@ -232,6 +232,38 @@ CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED PRIMARY KEY,
f1 VARCHAR(200),FULLTEXT fidx(f1))engine=innodb;
ALTER TABLE t1 DROP index fidx, ADD FULLTEXT INDEX(f1);
DROP TABLE t1;
+#
+# MDEV-21478 Inplace alter fails to report error when
+# FTS_DOC_ID is added
+SET NAMES utf8;
+CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
+ALTER TABLE t1 ADD FTS_DOC_ıD BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+ALTER TABLE t1 DROP COLUMN FTS_DOC_ıD;
+ALTER TABLE t1 ADD FTS_DOC_ıD BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT NOT NULL)ENGINE=InnoDB;
+ALTER TABLE t1 ADD FTS_DOC_Ä°D BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+ERROR 42000: Incorrect column name 'FTS_DOC_Ä°D'
+ALTER TABLE t1 ADD FTS_DOC_Ä°D BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+ERROR 42000: Incorrect column name 'FTS_DOC_Ä°D'
+ALTER TABLE t1 ADD fts_doc_id INT, ALGORITHM=COPY;
+ERROR 42000: Incorrect column name 'fts_doc_id'
+ALTER TABLE t1 ADD fts_doc_id INT, ALGORITHM=INPLACE;
+ERROR 42000: Incorrect column name 'fts_doc_id'
+ALTER TABLE t1 ADD fts_doc_id BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+ERROR 42000: Incorrect column name 'fts_doc_id'
+ALTER TABLE t1 ADD fts_doc_id BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+ERROR 42000: Incorrect column name 'fts_doc_id'
+ALTER TABLE t1 ADD FTS_DOC_ID INT UNSIGNED NOT NULL, ALGORITHM=COPY;
+ERROR 42000: Incorrect column name 'FTS_DOC_ID'
+ALTER TABLE t1 ADD FTS_DOC_ID INT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+ERROR 42000: Incorrect column name 'FTS_DOC_ID'
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b);
DROP TABLE t1;
@@ -244,3 +276,4 @@ CREATE TABLE t1
ENGINE=InnoDB;
ALTER TABLE t1 ADD c SERIAL;
DROP TABLE t1;
+# End of 10.3 tests
diff --git a/mysql-test/suite/innodb_fts/r/misc_debug.result b/mysql-test/suite/innodb_fts/r/misc_debug.result
index f1110797f33..10e3cf8874d 100644
--- a/mysql-test/suite/innodb_fts/r/misc_debug.result
+++ b/mysql-test/suite/innodb_fts/r/misc_debug.result
@@ -26,3 +26,29 @@ SET DEBUG_DBUG="+d,fts_instrument_sync";
INSERT INTO t1 VALUES(1, "mariadb");
ALTER TABLE t1 FORCE;
DROP TABLE t2, t1;
+#
+# MDEV-25200 Index count mismatch due to aborted FULLTEXT INDEX
+#
+CREATE TABLE t1(a INT, b TEXT, c TEXT, FULLTEXT INDEX(b)) ENGINE=InnoDB;
+connect con1,localhost,root,,test;
+SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL s1 WAIT_FOR g1';
+SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL s2 WAIT_FOR g2';
+ALTER TABLE t1 ADD FULLTEXT(c);
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR s1';
+KILL QUERY @id;
+SET DEBUG_SYNC='now SIGNAL g1 WAIT_FOR s2';
+START TRANSACTION;
+SELECT * FROM t1;
+a b c
+SET DEBUG_SYNC='now SIGNAL s2';
+connection con1;
+ERROR 70100: Query execution was interrupted
+disconnect con1;
+connection default;
+SET DEBUG_SYNC=RESET;
+ALTER TABLE t1 ADD bl INT AS (LENGTH(b)) VIRTUAL;
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb_fts/t/create.test b/mysql-test/suite/innodb_fts/t/create.test
index 4e522994fcc..38c93de4982 100644
--- a/mysql-test/suite/innodb_fts/t/create.test
+++ b/mysql-test/suite/innodb_fts/t/create.test
@@ -106,3 +106,14 @@ SET GLOBAL innodb_optimize_fulltext_only= 1;
OPTIMIZE TABLE t1;
DROP TABLE t1;
SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save;
+
+--echo #
+--echo # MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields
+--echo #
+--error ER_WRONG_NAME_FOR_INDEX
+create table t1 (
+ f1 int, f2 text,
+ FTS_DOC_ID bigint unsigned not null,
+ unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1),
+ fulltext (f2))
+engine=innodb;
diff --git a/mysql-test/suite/innodb_fts/t/fulltext.test b/mysql-test/suite/innodb_fts/t/fulltext.test
index 663b202265b..f3bcaaec5cc 100644
--- a/mysql-test/suite/innodb_fts/t/fulltext.test
+++ b/mysql-test/suite/innodb_fts/t/fulltext.test
@@ -717,3 +717,28 @@ while ($N)
}
DROP TABLE t;
+
+--echo #
+--echo # MDEV-25295 Aborted FTS_DOC_ID_INDEX considered as
+--echo # existing FTS_DOC_ID_INDEX during DDL
+--echo #
+SET sql_mode='';
+CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED NOT NULL,title CHAR(1),body TEXT)engine=innodb;
+INSERT INTO t1 (FTS_DOC_ID,title,body)VALUES(1,0,0), (1,0,0);
+--error ER_DUP_ENTRY
+CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
+--error ER_DUP_ENTRY
+CREATE FULLTEXT INDEX idx1 ON t1 (title,body);
+DROP TABLE t1;
+SET sql_mode = DEFAULT;
+
+--echo #
+--echo # MDEV-25070 SIGSEGV in fts_create_in_mem_aux_table
+--echo #
+CREATE TABLE t1 (a CHAR, FULLTEXT KEY(a)) ENGINE=InnoDB;
+ALTER TABLE t1 DISCARD TABLESPACE;
+ALTER TABLE t1 ADD FULLTEXT INDEX (a);
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--echo # End of 10.3 tests
diff --git a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test
index cca110f3550..7c56811a2d9 100644
--- a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test
+++ b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test
@@ -278,6 +278,47 @@ CREATE TABLE t1 (FTS_DOC_ID BIGINT UNSIGNED PRIMARY KEY,
ALTER TABLE t1 DROP index fidx, ADD FULLTEXT INDEX(f1);
DROP TABLE t1;
+--echo #
+--echo # MDEV-21478 Inplace alter fails to report error when
+--echo # FTS_DOC_ID is added
+
+SET NAMES utf8;
+
+CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB;
+ALTER TABLE t1 ADD FTS_DOC_ıD BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+ALTER TABLE t1 DROP COLUMN FTS_DOC_ıD;
+ALTER TABLE t1 ADD FTS_DOC_ıD BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+DROP TABLE t1;
+
+CREATE TABLE t1 (f1 INT NOT NULL)ENGINE=InnoDB;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD FTS_DOC_Ä°D BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD FTS_DOC_Ä°D BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD fts_doc_id INT, ALGORITHM=COPY;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD fts_doc_id INT, ALGORITHM=INPLACE;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD fts_doc_id BIGINT UNSIGNED NOT NULL, ALGORITHM=COPY;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD fts_doc_id BIGINT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD FTS_DOC_ID INT UNSIGNED NOT NULL, ALGORITHM=COPY;
+
+--error ER_WRONG_COLUMN_NAME
+ALTER TABLE t1 ADD FTS_DOC_ID INT UNSIGNED NOT NULL, ALGORITHM=INPLACE;
+
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
# Add more than one FTS index
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b);
@@ -294,3 +335,5 @@ CREATE TABLE t1
ENGINE=InnoDB;
ALTER TABLE t1 ADD c SERIAL;
DROP TABLE t1;
+
+--echo # End of 10.3 tests
diff --git a/mysql-test/suite/innodb_fts/t/misc_debug.test b/mysql-test/suite/innodb_fts/t/misc_debug.test
index aaf628abe6d..461e3f1d9d4 100644
--- a/mysql-test/suite/innodb_fts/t/misc_debug.test
+++ b/mysql-test/suite/innodb_fts/t/misc_debug.test
@@ -5,6 +5,8 @@
--source include/have_innodb.inc
--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/count_sessions.inc
# Following test is for Bug 14668777 - ASSERT ON IB_VECTOR_SIZE(
# TABLE->FTS->INDEXES, ALTER TABLE
@@ -52,3 +54,32 @@ INSERT INTO t1 VALUES(1, "mariadb");
ALTER TABLE t1 FORCE;
# Cleanup
DROP TABLE t2, t1;
+
+--echo #
+--echo # MDEV-25200 Index count mismatch due to aborted FULLTEXT INDEX
+--echo #
+CREATE TABLE t1(a INT, b TEXT, c TEXT, FULLTEXT INDEX(b)) ENGINE=InnoDB;
+connect(con1,localhost,root,,test);
+let $ID= `SELECT @id := CONNECTION_ID()`;
+SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL s1 WAIT_FOR g1';
+SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL s2 WAIT_FOR g2';
+send ALTER TABLE t1 ADD FULLTEXT(c);
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR s1';
+let $ignore= `SELECT @id := $ID`;
+KILL QUERY @id;
+SET DEBUG_SYNC='now SIGNAL g1 WAIT_FOR s2';
+START TRANSACTION;
+SELECT * FROM t1;
+SET DEBUG_SYNC='now SIGNAL s2';
+connection con1;
+--error ER_QUERY_INTERRUPTED
+reap;
+disconnect con1;
+connection default;
+SET DEBUG_SYNC=RESET;
+# Exploit MDEV-17468 to force the table definition to be reloaded
+ALTER TABLE t1 ADD bl INT AS (LENGTH(b)) VIRTUAL;
+CHECK TABLE t1;
+DROP TABLE t1;
+--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/suite/innodb_gis/t/1.test b/mysql-test/suite/innodb_gis/t/1.test
index 7c9199c59ae..f08fdaddbda 100644
--- a/mysql-test/suite/innodb_gis/t/1.test
+++ b/mysql-test/suite/innodb_gis/t/1.test
@@ -393,7 +393,6 @@ insert into t1 values (1);
insert into t1 values (1.11);
--error 1416
insert into t1 values ("qwerty");
-# --error ER_GIS_INVALID_DATA
--error ER_BAD_NULL_ERROR
insert into t1 values (ST_pointfromtext('point(1,1)'));
@@ -437,7 +436,6 @@ select
ST_y(b) IS NULL
from t1;
-# --error ER_GIS_INVALID_DATA
select
MBRwithin(b, b) IS NULL, MBRcontains(b, b) IS NULL, MBRoverlaps(b, b) IS NULL,
MBRequals(b, b) IS NULL, MBRdisjoint(b, b) IS NULL, ST_touches(b, b) IS NULL,
@@ -466,7 +464,6 @@ DROP TABLE t1;
#
CREATE TABLE `t1` ( `col9` set('a'), `col89` date);
INSERT IGNORE INTO `t1` VALUES ('','0000-00-00');
-# --error ER_GIS_INVALID_DATA
select ST_geomfromtext(col9,col89) as a from t1;
DROP TABLE t1;
@@ -623,17 +620,11 @@ SELECT GROUP_CONCAT(a2.name ORDER BY a2.name) AS MBRwithin FROM t1 a1 JOIN
# MBROverlaps needs a few more tests, with point and line dimensions
-# --error ER_GIS_INVALID_DATA
SET @vert1 = ST_GeomFromText('POLYGON ((0 -2, 0 2, 0 -2))');
-# --error ER_GIS_INVALID_DATA
SET @horiz1 = ST_GeomFromText('POLYGON ((-2 0, 2 0, -2 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz2 = ST_GeomFromText('POLYGON ((-1 0, 3 0, -1 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz3 = ST_GeomFromText('POLYGON ((2 0, 3 0, 2 0))');
-# --error ER_GIS_INVALID_DATA
SET @point1 = ST_GeomFromText('POLYGON ((0 0))');
-# --error ER_GIS_INVALID_DATA
SET @point2 = ST_GeomFromText('POLYGON ((-2 0))');
SELECT GROUP_CONCAT(a1.name ORDER BY a1.name) AS MBRoverlaps FROM t1 a1 WHERE MBROverlaps(a1.square, @vert1) GROUP BY a1.name;
@@ -773,10 +764,8 @@ SELECT 1 FROM (SELECT GREATEST(1,GEOMETRYCOLLECTION('00000','00000')) b FROM DUA
--echo # BUG#51875: crash when loading data into geometry function ST_polyfromwkb
--echo #
SET @a=0x00000000030000000100000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
-# --error ER_GIS_INVALID_DATA
SET @a=ST_POLYFROMWKB(@a);
SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
-# --error ER_GIS_INVALID_DATA
SET @a=ST_POLYFROMWKB(@a);
@@ -901,7 +890,6 @@ DROP TABLE g1;
CREATE TABLE g1(a TEXT NOT NULL, KEY(a(255)));
INSERT INTO g1 VALUES ('a'),('a');
-# --error ER_GIS_INVALID_DATA
SELECT 1 FROM g1 WHERE a >= ANY
(SELECT 1 FROM g1 WHERE a = ST_geomfromtext('') OR a) ;
diff --git a/mysql-test/suite/innodb_gis/t/bug16236208.test b/mysql-test/suite/innodb_gis/t/bug16236208.test
index b55ab1d0fd3..3a1fbefc52c 100644
--- a/mysql-test/suite/innodb_gis/t/bug16236208.test
+++ b/mysql-test/suite/innodb_gis/t/bug16236208.test
@@ -47,7 +47,6 @@ ST_GeomFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))'));
CREATE INDEX linestring_index ON linestring(linestring_nokey(5));
ALTER TABLE linestring ADD KEY (linestring_key(5));
-# --error ER_GIS_INVALID_DATA
SELECT ST_AsText(linestring_nokey) FROM linestring FORCE KEY (
linestring_key ) WHERE ST_CONTAINS( ST_GeomFromText('POLYGON( ( 3923 2815 , 4246
2122 , 4028 2971 , 4017 3019 , 3923 2815 ) )') , linestring_key ) AND
diff --git a/mysql-test/suite/innodb_gis/t/create_spatial_index.test b/mysql-test/suite/innodb_gis/t/create_spatial_index.test
index 7ddece9ad86..ef87a51d372 100644
--- a/mysql-test/suite/innodb_gis/t/create_spatial_index.test
+++ b/mysql-test/suite/innodb_gis/t/create_spatial_index.test
@@ -1143,7 +1143,6 @@ insert into `t1` values(
linestring(point(1,1),point(1,1))
);
-# --error ER_GIS_INVALID_DATA
--error ER_BAD_NULL_ERROR
insert into `t1` values
(
diff --git a/mysql-test/suite/innodb_gis/t/gis.test b/mysql-test/suite/innodb_gis/t/gis.test
index 44aec76770a..629bb94b8c5 100644
--- a/mysql-test/suite/innodb_gis/t/gis.test
+++ b/mysql-test/suite/innodb_gis/t/gis.test
@@ -1,4 +1,4 @@
-# This is a testcase copied from mysql-test/t/gis.test
+# This is a testcase copied from mysql-test/main/gis.test
--source include/have_innodb.inc
-- source include/have_geometry.inc
@@ -388,7 +388,6 @@ insert into t1 values (1.11);
--error 1416
insert into t1 values ("qwerty");
--error 1048
-# --error ER_GIS_INVALID_DATA
insert into t1 values (ST_pointfromtext('point(1,1)'));
drop table t1;
@@ -431,7 +430,6 @@ select
ST_y(b) IS NULL
from t1;
-# --error ER_GIS_INVALID_DATA
select
MBRwithin(b, b) IS NULL, MBRcontains(b, b) IS NULL, MBRoverlaps(b, b) IS NULL,
MBRequals(b, b) IS NULL, MBRdisjoint(b, b) IS NULL, ST_touches(b, b) IS NULL,
@@ -460,7 +458,6 @@ DROP TABLE t1;
#
CREATE TABLE `t1` ( `col9` set('a'), `col89` date);
INSERT IGNORE INTO `t1` VALUES ('','0000-00-00');
-# --error ER_GIS_INVALID_DATA
select ST_geomfromtext(col9,col89) as a from t1;
DROP TABLE t1;
@@ -617,17 +614,11 @@ SELECT GROUP_CONCAT(a2.name ORDER BY a2.name) AS MBRwithin FROM t1 a1 JOIN
# MBROverlaps needs a few more tests, with point and line dimensions
-# --error ER_GIS_INVALID_DATA
SET @vert1 = ST_GeomFromText('POLYGON ((0 -2, 0 2, 0 -2))');
-# --error ER_GIS_INVALID_DATA
SET @horiz1 = ST_GeomFromText('POLYGON ((-2 0, 2 0, -2 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz2 = ST_GeomFromText('POLYGON ((-1 0, 3 0, -1 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz3 = ST_GeomFromText('POLYGON ((2 0, 3 0, 2 0))');
-# --error ER_GIS_INVALID_DATA
SET @point1 = ST_GeomFromText('POLYGON ((0 0))');
-# --error ER_GIS_INVALID_DATA
SET @point2 = ST_GeomFromText('POLYGON ((-2 0))');
SELECT GROUP_CONCAT(a1.name ORDER BY a1.name) AS MBRoverlaps FROM t1 a1 WHERE MBROverlaps(a1.square, @vert1) GROUP BY a1.name;
@@ -767,10 +758,8 @@ SELECT 1 FROM (SELECT GREATEST(1,GEOMETRYCOLLECTION('00000','00000')) b FROM DUA
--echo # BUG#51875: crash when loading data into geometry function ST_polyfromwkb
--echo #
SET @a=0x00000000030000000100000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
-# --error ER_GIS_INVALID_DATA
SET @a=ST_POLYFROMWKB(@a);
SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440;
-# --error ER_GIS_INVALID_DATA
SET @a=ST_POLYFROMWKB(@a);
@@ -903,7 +892,6 @@ DROP TABLE g1;
CREATE TABLE g1(a TEXT NOT NULL, KEY(a(255)));
INSERT INTO g1 VALUES ('a'),('a');
-# --error ER_GIS_INVALID_DATA
SELECT 1 FROM g1 WHERE a >= ANY
(SELECT 1 FROM g1 WHERE a = ST_geomfromtext('') OR a) ;
@@ -1442,6 +1430,5 @@ DROP DATABASE gis_ogs;
--echo # Bug#13362660 ASSERTION `FIELD_POS < FIELD_COUNT' FAILED. IN PROTOCOL_TEXT::STORE
--echo #
-# --error ER_GIS_INVALID_DATA
--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
SELECT ST_Union('', ''), md5(1);
diff --git a/mysql-test/suite/innodb_gis/t/precise.test b/mysql-test/suite/innodb_gis/t/precise.test
index 08b7e348362..3ddfc9369a4 100644
--- a/mysql-test/suite/innodb_gis/t/precise.test
+++ b/mysql-test/suite/innodb_gis/t/precise.test
@@ -122,7 +122,6 @@ SELECT ST_Equals(ST_PointFromText('POINT (12 13)'),ST_PointFromText('POINT (12 1
--echo # BUG#11759650/51979: UNION/INTERSECTION OF POLYGONS CRASHES MYSQL
--echo #
-# --error ER_GIS_INVALID_DATA
SELECT ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('POLYGON((525000 183300,525400
183300,525400 18370, 525000 183700,525000 183300))'),
ST_geomfromtext('POLYGON((525298.67 183511.53,525296.57
diff --git a/mysql-test/suite/innodb_gis/t/rtree.test b/mysql-test/suite/innodb_gis/t/rtree.test
index 58d81576b3e..98931e70e62 100644
--- a/mysql-test/suite/innodb_gis/t/rtree.test
+++ b/mysql-test/suite/innodb_gis/t/rtree.test
@@ -78,17 +78,11 @@ SELECT name, ST_AsText(square) from t1 where MBRWithin(@p, square);
# MBROverlaps needs a few more tests, with point and line dimensions
-# --error ER_GIS_INVALID_DATA
SET @vert1 = ST_GeomFromText('POLYGON ((0 -2, 0 2, 0 -2))');
-# --error ER_GIS_INVALID_DATA
SET @horiz1 = ST_GeomFromText('POLYGON ((-2 0, 2 0, -2 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz2 = ST_GeomFromText('POLYGON ((-1 0, 3 0, -1 0))');
-# --error ER_GIS_INVALID_DATA
SET @horiz3 = ST_GeomFromText('POLYGON ((2 0, 3 0, 2 0))');
-# --error ER_GIS_INVALID_DATA
SET @point1 = ST_GeomFromText('POLYGON ((0 0))');
-# --error ER_GIS_INVALID_DATA
SET @point2 = ST_GeomFromText('POLYGON ((-2 0))');
SELECT GROUP_CONCAT(a1.name ORDER BY a1.name) AS MBRoverlaps FROM t1 a1 WHERE MBROverlaps(a1.square, @vert1) GROUP BY a1.name;
diff --git a/mysql-test/suite/innodb_gis/t/rtree_purge.test b/mysql-test/suite/innodb_gis/t/rtree_purge.test
index 42f00428b88..60ecbe2e53a 100644
--- a/mysql-test/suite/innodb_gis/t/rtree_purge.test
+++ b/mysql-test/suite/innodb_gis/t/rtree_purge.test
@@ -1,7 +1,7 @@
# This test case will test R-tree purge.
--source include/innodb_page_size.inc
-# Valgrind takes too much time on PB2 even in the --big-test runs.
+--source include/have_sequence.inc
--source include/not_valgrind.inc
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
@@ -16,9 +16,7 @@ set @p=point(1,1);
let $n=200;
while ($n) {
begin;
-insert into t values(@p,@p),(@p,@p);
-insert into t select @p,@p
-from t a,t b,t c,t d,t e,t f,t g;
+insert into t select @p,@p from seq_1_to_130;
delete from t;
commit;
dec $n;
diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
index 4866c152640..986453851eb 100644
--- a/mysql-test/suite/innodb_zip/r/index_large_prefix.result
+++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
@@ -325,10 +325,10 @@ ROW_FORMAT=DYNAMIC;
SET sql_mode='';
create index idx1 on worklog5743(a2);
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
create index idx2 on worklog5743(a3);
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
create index idx3 on worklog5743(a4);
show warnings;
Level Code Message
@@ -337,7 +337,7 @@ create index idx4 on worklog5743(a1, a2);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
show warnings;
Level Code Message
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
Error 1071 Specified key was too long; max key length is 3072 bytes
create index idx5 on worklog5743(a1, a5);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
diff --git a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
index 2c66133404c..e88b72ef1d4 100644
--- a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
+++ b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
@@ -1224,7 +1224,7 @@ DROP INDEX prefix_idx ON worklog5743;
SET sql_mode = 'NO_ENGINE_SUBSTITUTION';
CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000));
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
SET sql_mode = default;
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743;
diff --git a/mysql-test/suite/maria/icp.result b/mysql-test/suite/maria/icp.result
index 96793beae8a..975c280d467 100644
--- a/mysql-test/suite/maria/icp.result
+++ b/mysql-test/suite/maria/icp.result
@@ -450,11 +450,11 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
-EXPLAIN SELECT pk, c1 FROM t1 WHERE pk <> 3;
+EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 5 Using where
SET SESSION optimizer_switch='index_condition_pushdown=off';
-SELECT pk, c1 FROM t1 WHERE pk <> 3;
+SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
1 9
2 7
@@ -682,23 +682,23 @@ INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
1 SIMPLE t2 ref a a 515 const 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
SET SESSION optimizer_switch='index_condition_pushdown=on';
EXPLAIN
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
1 SIMPLE t2 ref a a 515 const 1 Using where
-SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
+SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
1 4
diff --git a/mysql-test/suite/maria/maria-ucs2.result b/mysql-test/suite/maria/maria-ucs2.result
index 1a54ab78081..321a374f4c2 100644
--- a/mysql-test/suite/maria/maria-ucs2.result
+++ b/mysql-test/suite/maria/maria-ucs2.result
@@ -17,7 +17,7 @@ test.t1 check status OK
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
ALTER TABLE t1 MODIFY a VARCHAR(800) CHARSET `ucs2`;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -30,7 +30,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(800),KEY(a)) ENGINE=Aria CHARACTER SET ucs2;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES (REPEAT('abc ',200));
CHECK TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result
index 913bf4efbdf..03d37270eed 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -1594,7 +1594,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table if exists t1;
set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for
create table t1 (v varchar(65536));
@@ -1866,7 +1866,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1876,7 +1876,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1024);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1886,7 +1886,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1953,7 +1953,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1965,7 +1965,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1995,7 +1995,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result
index 64ae268997a..09fc696c272 100644
--- a/mysql-test/suite/maria/maria3.result
+++ b/mysql-test/suite/maria/maria3.result
@@ -17,7 +17,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/maria/mrr.result b/mysql-test/suite/maria/mrr.result
index 5c709fb34e5..17b7b751dfb 100644
--- a/mysql-test/suite/maria/mrr.result
+++ b/mysql-test/suite/maria/mrr.result
@@ -393,7 +393,7 @@ PRIMARY KEY (pk),
KEY col_varchar_1024_latin1_key (col_varchar_1024_latin1_key)
) ENGINE=Aria;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES
(1,'z'), (2,'abcdefjhjkl'), (3,'in'), (4,'abcdefjhjkl'), (6,'abcdefjhjkl'),
(11,'zx'), (12,'abcdefjhjm'), (13,'jn'), (14,'abcdefjhjp'), (16,'abcdefjhjr');
@@ -430,7 +430,7 @@ f5 varchar(1024) COLLATE latin1_bin,
KEY (f5)
) ENGINE=Aria TRANSACTIONAL=0 ;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
# Fill the table with some data
SELECT alias2.* , alias1.f2
FROM
diff --git a/mysql-test/suite/mariabackup/error_during_copyback.result b/mysql-test/suite/mariabackup/error_during_copyback.result
new file mode 100644
index 00000000000..ba27f0f83e6
--- /dev/null
+++ b/mysql-test/suite/mariabackup/error_during_copyback.result
@@ -0,0 +1,10 @@
+CREATE TABLE t(i INT) ENGINE INNODB;
+INSERT INTO t VALUES(1);
+# xtrabackup backup
+# xtrabackup prepare
+# restart server
+# restart
+SELECT * FROM t;
+i
+1
+DROP TABLE t;
diff --git a/mysql-test/suite/mariabackup/error_during_copyback.test b/mysql-test/suite/mariabackup/error_during_copyback.test
new file mode 100644
index 00000000000..3ec9fbfc3c3
--- /dev/null
+++ b/mysql-test/suite/mariabackup/error_during_copyback.test
@@ -0,0 +1,25 @@
+--source include/have_debug.inc
+CREATE TABLE t(i INT) ENGINE INNODB;
+INSERT INTO t VALUES(1);
+echo # xtrabackup backup;
+let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
+--enable_result_log
+echo # xtrabackup prepare;
+--disable_result_log
+exec $XTRABACKUP --prepare --target-dir=$targetdir;
+let $_datadir= `SELECT @@datadir`;
+--source include/shutdown_mysqld.inc
+rmdir $_datadir;
+error 1;
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --dbug=+d,copy_file_error;
+list_files $_datadir;
+rmdir $_datadir;
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir;
+echo # restart server;
+--source include/start_mysqld.inc
+SELECT * FROM t;
+DROP TABLE t;
+rmdir $targetdir;
+
diff --git a/mysql-test/suite/mariabackup/innodb_force_recovery.result b/mysql-test/suite/mariabackup/innodb_force_recovery.result
new file mode 100644
index 00000000000..6626bb0bc55
--- /dev/null
+++ b/mysql-test/suite/mariabackup/innodb_force_recovery.result
@@ -0,0 +1,26 @@
+CREATE TABLE t(i INT) ENGINE INNODB;
+INSERT INTO t VALUES(1);
+# "innodb_force_recovery=1" should be allowed with "--prepare" only (mariabackup)
+FOUND 1 /should only be used with "--prepare"/ in backup.log
+# "innodb_force_recovery=1" should be allowed with "--apply-log" only (innobackupex)
+FOUND 1 /should only be used with "--apply-log"/ in backup.log
+# "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (mariabackup)
+FOUND 1 /innodb_force_recovery = 1/ in backup.log
+# "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (innobackupex)
+FOUND 1 /innodb_force_recovery = 1/ in backup.log
+# "innodb_force_recovery" should be read from "backup-my.cnf" (mariabackup)
+FOUND 1 /innodb_force_recovery = 1/ in backup.log
+# "innodb_force_recovery=1" should be read from "backup-my.cnf" (innobackupex)
+FOUND 1 /innodb_force_recovery = 1/ in backup.log
+# "innodb_force_recovery" from the command line should override "backup-my.cnf" (mariabackup)
+NOT FOUND /innodb_force_recovery = 1/ in backup.log
+# "innodb_force_recovery" from the command line should override "backup-my.cnf" (innobackupex)
+NOT FOUND /innodb_force_recovery = 1/ in backup.log
+# shutdown server
+# remove datadir
+# xtrabackup move back
+# restart
+SELECT * FROM t;
+i
+1
+DROP TABLE t;
diff --git a/mysql-test/suite/mariabackup/innodb_force_recovery.test b/mysql-test/suite/mariabackup/innodb_force_recovery.test
new file mode 100644
index 00000000000..3a7b3c6106c
--- /dev/null
+++ b/mysql-test/suite/mariabackup/innodb_force_recovery.test
@@ -0,0 +1,138 @@
+# This test checks if "innodb_force_recovery" is only allowed with "--prepare"
+# (for mariabackup) and "--apply-log" (for innobackupex), and is limited to
+# "SRV_FORCE_IGNORE_CORRUPT" only.
+
+# Setup.
+--source include/have_innodb.inc
+
+--let targetdir=$MYSQLTEST_VARDIR/tmp/backup
+--let backuplog=$MYSQLTEST_VARDIR/tmp/backup.log
+
+CREATE TABLE t(i INT) ENGINE INNODB;
+INSERT INTO t VALUES(1);
+
+# Check for command line arguments.
+--echo # "innodb_force_recovery=1" should be allowed with "--prepare" only (mariabackup)
+--disable_result_log
+--error 1
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --innodb-force-recovery=1 --target-dir=$targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=should only be used with "--prepare"
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+
+--echo # "innodb_force_recovery=1" should be allowed with "--apply-log" only (innobackupex)
+--disable_result_log
+--error 1
+exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp --innodb-force-recovery=1 $targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=should only be used with "--apply-log"
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
+--enable_result_log
+--echo # "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (mariabackup)
+--disable_result_log
+exec $XTRABACKUP --prepare --innodb-force-recovery=2 --target-dir=$targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+rmdir $targetdir;
+
+--disable_result_log
+exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
+--enable_result_log
+--echo # "innodb_force_recovery" should be limited to "SRV_FORCE_IGNORE_CORRUPT" (innobackupex)
+--disable_result_log
+exec $XTRABACKUP --innobackupex --apply-log --innodb-force-recovery=2 $targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+rmdir $targetdir;
+
+# Check for default file ("backup-my.cnf").
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
+--enable_result_log
+perl;
+my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
+open(my $fd, '>>', "$cfg_path");
+print $fd "innodb_force_recovery=1\n";
+close $fd;
+EOF
+--echo # "innodb_force_recovery" should be read from "backup-my.cnf" (mariabackup)
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --prepare --export --target-dir=$targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+rmdir $targetdir;
+
+--disable_result_log
+exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
+--enable_result_log
+perl;
+my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
+open(my $fd, '>>', "$cfg_path");
+print $fd "innodb_force_recovery=2\n";
+close $fd;
+EOF
+--echo # "innodb_force_recovery=1" should be read from "backup-my.cnf" (innobackupex)
+--disable_result_log
+exec $XTRABACKUP --innobackupex --defaults-file=$targetdir/backup-my.cnf --apply-log --export $targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+rmdir $targetdir;
+
+# Check for command line argument precedence.
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
+--enable_result_log
+perl;
+my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
+open(my $fd, '>>', "$cfg_path");
+print $fd "innodb_force_recovery=1\n";
+close $fd;
+EOF
+--echo # "innodb_force_recovery" from the command line should override "backup-my.cnf" (mariabackup)
+--disable_result_log
+exec $XTRABACKUP --defaults-file=$targetdir/backup-my.cnf --prepare --innodb-force-recovery=0 --target-dir=$targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+rmdir $targetdir;
+
+--disable_result_log
+exec $XTRABACKUP --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --no-timestamp $targetdir;
+--enable_result_log
+perl;
+my $cfg_path="$ENV{'targetdir'}/backup-my.cnf";
+open(my $fd, '>>', "$cfg_path");
+print $fd "innodb_force_recovery=2\n";
+close $fd;
+EOF
+--echo # "innodb_force_recovery" from the command line should override "backup-my.cnf" (innobackupex)
+--disable_result_log
+exec $XTRABACKUP --innobackupex --defaults-file=$targetdir/backup-my.cnf --apply-log --innodb-force-recovery=0 --export $targetdir >$backuplog;
+--enable_result_log
+--let SEARCH_PATTERN=innodb_force_recovery = 1
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+
+--source include/restart_and_restore.inc
+
+# Check for restore.
+SELECT * FROM t;
+
+# Clean-up.
+DROP TABLE t;
+--rmdir $targetdir
+--remove_file $backuplog
diff --git a/mysql-test/suite/mariabackup/log_page_corruption.result b/mysql-test/suite/mariabackup/log_page_corruption.result
index be29ea435b6..91db833622a 100644
--- a/mysql-test/suite/mariabackup/log_page_corruption.result
+++ b/mysql-test/suite/mariabackup/log_page_corruption.result
@@ -23,11 +23,12 @@ INSERT INTO t6_corrupted_to_drop VALUES (3), (4), (5), (6), (7), (8), (9);
INSERT INTO t7_corrupted_to_alter VALUES (3), (4), (5), (6), (7), (8), (9);
# Corrupt tables
# restart
-# Backup must fail due to page corruption
+# Backup must fail due to page corruption
FOUND 1 /Database page corruption detected.*/ in backup.log
# "innodb_corrupted_pages" file must not exist
-# Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
+# Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
FOUND 1 /Database page corruption detected.*/ in backup.log
+FOUND 1 /completed OK!/ in backup.log
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
@@ -44,7 +45,7 @@ INSERT INTO t1_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9);
INSERT INTO t2_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9);
INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9);
# restart
-# Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
+# Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
diff --git a/mysql-test/suite/mariabackup/log_page_corruption.test b/mysql-test/suite/mariabackup/log_page_corruption.test
index e9419687288..0151afb96b4 100644
--- a/mysql-test/suite/mariabackup/log_page_corruption.test
+++ b/mysql-test/suite/mariabackup/log_page_corruption.test
@@ -59,7 +59,7 @@ EOF
--let corrupted_pages_file_filt = $MYSQLTEST_VARDIR/tmp/innodb_corrupted_pages_filt
--let perl_result_file=$MYSQLTEST_VARDIR/tmp/perl_result
---echo # Backup must fail due to page corruption
+--echo # Backup must fail due to page corruption
--disable_result_log
--error 1
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir > $backuplog;
@@ -80,15 +80,19 @@ exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=
--let after_copy_test_t7_corrupted_to_alter=ALTER TABLE test.t7_corrupted_to_alter ADD COLUMN (d INT)
--let add_corrupted_page_for_test_t7_corrupted_to_alter=3
---echo # Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
+--echo # Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
--disable_result_log
---error 1
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog
--enable_result_log
--let SEARCH_PATTERN=Database page corruption detected.*
--let SEARCH_FILE=$backuplog
--source include/search_pattern_in_file.inc
+
+--let SEARCH_PATTERN=completed OK!
+--let SEARCH_FILE=$backuplog
+--source include/search_pattern_in_file.inc
+
--echo --- "innodb_corrupted_pages" file content: ---
perl;
do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl";
@@ -145,9 +149,8 @@ EOF
--let after_copy_test_t7_inc_corrupted_to_alter=ALTER TABLE test.t7_inc_corrupted_to_alter ADD COLUMN (d INT)
--let add_corrupted_page_for_test_t7_inc_corrupted_to_alter=3
---echo # Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
+--echo # Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
--disable_result_log
---error 1
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$incdir --incremental-basedir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog
--disable_result_log
@@ -161,6 +164,9 @@ EOF
--let SEARCH_PATTERN=Database page corruption detected.*
--let SEARCH_FILE=$backuplog
--source include/search_pattern_in_file.inc
+--let SEARCH_PATTERN=completed OK!
+--source include/search_pattern_in_file.inc
+
--let corrupted_pages_file = $incdir/innodb_corrupted_pages
--echo --- "innodb_corrupted_pages" file content: ---
perl;
@@ -260,7 +266,6 @@ EOF
--echo # Full backup with --log-innodb-page-corruption
--disable_result_log
---error 1
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$targetdir
--enable_result_log
--let corrupted_pages_file = $targetdir/innodb_corrupted_pages
@@ -288,7 +293,6 @@ EOF
--echo # Incremental backup --log-innodb-page-corruption
--disable_result_log
---error 1
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$incdir --incremental-basedir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog
--disable_result_log
--let corrupted_pages_file = $incdir/innodb_corrupted_pages
diff --git a/mysql-test/suite/optimizer_unfixed_bugs/r/bug42991.result b/mysql-test/suite/optimizer_unfixed_bugs/r/bug42991.result
index f5554563a18..40f505d8260 100644
--- a/mysql-test/suite/optimizer_unfixed_bugs/r/bug42991.result
+++ b/mysql-test/suite/optimizer_unfixed_bugs/r/bug42991.result
@@ -245,7 +245,7 @@ UNLOCK TABLES;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-select * from `table5` where (col2 <= '6566-06-15' AND col24 <> 'd') group by `col83` order by `col83` desc ;
+select * from `table5` where (col2 <= '6566-06-15' AND (col24 < 'd' or col24 > 'd')) group by `col83` order by `col83` desc ;
col0 col1 col2 col3 col4 col5 col6 col7 col8 col9 col10 col11 col12 col13 col14 col15 col16 col17 col18 col19 col20 col21 col22 col23 col24 col25 col26 col27 col28 col29 col30 col31 col32 col33 col34 col35 col36 col37 col38 col39 col40 col41 col42 col43 col44 col45 col46 col47 col48 col49 col50 col51 col52 col53 col54 col55 col56 col57 col58 col59 col60 col61 col62 col63 col64 col65 col66 col67 col68 col69 col70 col71 col72 col73 col74 col75 col76 col77 col78 col79 col80 col81 col82 col83 col84 col85 col86 col87 col88 col89 col90 col91 col92 col93 col94 col95 col96 col97 col98 col99 col100 col101 col102 col103 col104 col105 col106 col107 col108 col109 col110 col111 col112 col113 col114 col115 col116 col117 col118 col119 col120 col121 col122 col123 col124 col125 col126 col127 col128 col129 col130 col131 col132 col133 col134 col135 col136 col137 col138 col139 col140 col141 col142 col143 col144 col145 col146 col147 col148 col149 col150 col151 col152 col153 col154 col155 col156 col157 col158 col159 col160 col161 col162 col163 col164 col165 col166 col167 col168 col169 col170 col171 col172 col173 col174 col175
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'd'
diff --git a/mysql-test/suite/optimizer_unfixed_bugs/t/bug42991.test b/mysql-test/suite/optimizer_unfixed_bugs/t/bug42991.test
index d59e9e1fbeb..9e8e5c4b3cd 100644
--- a/mysql-test/suite/optimizer_unfixed_bugs/t/bug42991.test
+++ b/mysql-test/suite/optimizer_unfixed_bugs/t/bug42991.test
@@ -242,9 +242,9 @@ UNLOCK TABLES;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-#explain select * from `table5` where (col2 <= '6566-06-15' AND col24 <> 'd') group by `col83` order by `col83` desc ;
+#explain select * from `table5` where (col2 <= '6566-06-15' AND (col24 < 'd' or col24 > 'd') group by `col83` order by `col83` desc ;
-select * from `table5` where (col2 <= '6566-06-15' AND col24 <> 'd') group by `col83` order by `col83` desc ;
+select * from `table5` where (col2 <= '6566-06-15' AND (col24 < 'd' or col24 > 'd')) group by `col83` order by `col83` desc ;
drop table `table5`;
SET debug_dbug= @saved_dbug;
diff --git a/mysql-test/suite/perfschema/r/schema.result b/mysql-test/suite/perfschema/r/schema.result
index 1f331394df6..8ce4cad4f4b 100644
--- a/mysql-test/suite/perfschema/r/schema.result
+++ b/mysql-test/suite/perfschema/r/schema.result
@@ -62,7 +62,7 @@ users
show create table accounts;
Table Create Table
accounts CREATE TABLE `accounts` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`CURRENT_CONNECTIONS` bigint(20) NOT NULL,
`TOTAL_CONNECTIONS` bigint(20) NOT NULL
@@ -140,7 +140,7 @@ events_stages_summary_by_thread_by_event_name CREATE TABLE `events_stages_summar
show create table events_stages_summary_by_user_by_event_name;
Table Create Table
events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -151,7 +151,7 @@ events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_
show create table events_stages_summary_by_account_by_event_name;
Table Create Table
events_stages_summary_by_account_by_event_name CREATE TABLE `events_stages_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -398,7 +398,7 @@ events_statements_summary_by_thread_by_event_name CREATE TABLE `events_statement
show create table events_statements_summary_by_user_by_event_name;
Table Create Table
events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -428,7 +428,7 @@ events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_
show create table events_statements_summary_by_account_by_event_name;
Table Create Table
events_statements_summary_by_account_by_event_name CREATE TABLE `events_statements_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -590,7 +590,7 @@ events_waits_summary_by_thread_by_event_name CREATE TABLE `events_waits_summary_
show create table events_waits_summary_by_user_by_event_name;
Table Create Table
events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -601,7 +601,7 @@ events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by
show create table events_waits_summary_by_account_by_event_name;
Table Create Table
events_waits_summary_by_account_by_event_name CREATE TABLE `events_waits_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -762,8 +762,8 @@ show create table setup_actors;
Table Create Table
setup_actors CREATE TABLE `setup_actors` (
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
- `ROLE` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%'
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
+ `ROLE` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%'
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table setup_consumers;
Table Create Table
@@ -1028,7 +1028,7 @@ threads CREATE TABLE `threads` (
`NAME` varchar(128) NOT NULL,
`TYPE` varchar(10) NOT NULL,
`PROCESSLIST_ID` bigint(20) unsigned DEFAULT NULL,
- `PROCESSLIST_USER` varchar(16) DEFAULT NULL,
+ `PROCESSLIST_USER` varchar(128) DEFAULT NULL,
`PROCESSLIST_HOST` varchar(60) DEFAULT NULL,
`PROCESSLIST_DB` varchar(64) DEFAULT NULL,
`PROCESSLIST_COMMAND` varchar(16) DEFAULT NULL,
@@ -1042,7 +1042,7 @@ threads CREATE TABLE `threads` (
show create table users;
Table Create Table
users CREATE TABLE `users` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`CURRENT_CONNECTIONS` bigint(20) NOT NULL,
`TOTAL_CONNECTIONS` bigint(20) NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
diff --git a/mysql-test/suite/perfschema/r/table_schema.result b/mysql-test/suite/perfschema/r/table_schema.result
index 8caf2017fd2..b7c138c54c7 100644
--- a/mysql-test/suite/perfschema/r/table_schema.result
+++ b/mysql-test/suite/perfschema/r/table_schema.result
@@ -1,7 +1,7 @@
select * from information_schema.columns where table_schema="performance_schema"
order by table_name, ordinal_position;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
-def performance_schema accounts USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema accounts USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema accounts HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema accounts CURRENT_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema accounts TOTAL_CONNECTIONS 4 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
@@ -37,7 +37,7 @@ def performance_schema events_stages_history_long TIMER_END 7 NULL YES bigint NU
def performance_schema events_stages_history_long TIMER_WAIT 8 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_history_long NESTING_EVENT_ID 9 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_history_long NESTING_EVENT_TYPE 10 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL
-def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -59,7 +59,7 @@ def performance_schema events_stages_summary_by_thread_by_event_name SUM_TIMER_W
def performance_schema events_stages_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -192,7 +192,7 @@ def performance_schema events_statements_history_long NO_INDEX_USED 37 NULL NO b
def performance_schema events_statements_history_long NO_GOOD_INDEX_USED 38 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_history_long NESTING_EVENT_ID 39 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_history_long NESTING_EVENT_TYPE 40 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL
-def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -300,7 +300,7 @@ def performance_schema events_statements_summary_by_thread_by_event_name SUM_SOR
def performance_schema events_statements_summary_by_thread_by_event_name SUM_SORT_SCAN 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_GOOD_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -408,7 +408,7 @@ def performance_schema events_waits_history_long NESTING_EVENT_TYPE 16 NULL YES
def performance_schema events_waits_history_long OPERATION 17 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references NEVER NULL
def performance_schema events_waits_history_long NUMBER_OF_BYTES 18 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema events_waits_history_long FLAGS 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -437,7 +437,7 @@ def performance_schema events_waits_summary_by_thread_by_event_name SUM_TIMER_WA
def performance_schema events_waits_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -561,8 +561,8 @@ def performance_schema session_connect_attrs ATTR_NAME 2 NULL NO varchar 32 96 N
def performance_schema session_connect_attrs ATTR_VALUE 3 NULL YES varchar 1024 3072 NULL NULL NULL utf8 utf8_bin varchar(1024) select,insert,update,references NEVER NULL
def performance_schema session_connect_attrs ORDINAL_POSITION 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL
def performance_schema setup_actors HOST 1 '%' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
-def performance_schema setup_actors USER 2 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
-def performance_schema setup_actors ROLE 3 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema setup_actors USER 2 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
+def performance_schema setup_actors ROLE 3 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema setup_consumers NAME 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema setup_consumers ENABLED 2 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL
def performance_schema setup_instruments NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
@@ -783,7 +783,7 @@ def performance_schema threads THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NU
def performance_schema threads NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema threads TYPE 3 NULL NO varchar 10 30 NULL NULL NULL utf8 utf8_general_ci varchar(10) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_ID 4 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL
+def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_HOST 6 NULL YES varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_DB 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_COMMAND 8 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL
@@ -793,7 +793,7 @@ def performance_schema threads PROCESSLIST_INFO 11 NULL YES longtext 4294967295
def performance_schema threads PARENT_THREAD_ID 12 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema threads ROLE 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema threads INSTRUMENTED 14 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL
-def performance_schema users USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema users USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema users CURRENT_CONNECTIONS 2 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema users TOTAL_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
select count(*) from information_schema.columns
diff --git a/mysql-test/suite/perfschema/r/threads_mysql.result b/mysql-test/suite/perfschema/r/threads_mysql.result
index 31f91fc6464..40e6360fec7 100644
--- a/mysql-test/suite/perfschema/r/threads_mysql.result
+++ b/mysql-test/suite/perfschema/r/threads_mysql.result
@@ -17,6 +17,16 @@ processlist_info NULL
unified_parent_thread_id NULL
role NULL
instrumented YES
+name thread/sql/manager
+type BACKGROUND
+processlist_user NULL
+processlist_host NULL
+processlist_db NULL
+processlist_command NULL
+processlist_info NULL
+unified_parent_thread_id unified parent_thread_id
+role NULL
+instrumented YES
name thread/sql/one_connection
type FOREGROUND
processlist_user root
@@ -44,16 +54,6 @@ processlist_info NULL
unified_parent_thread_id unified parent_thread_id
role NULL
instrumented YES
-name thread/sql/slave_background
-type BACKGROUND
-processlist_user NULL
-processlist_host NULL
-processlist_db NULL
-processlist_command NULL
-processlist_info NULL
-unified_parent_thread_id unified parent_thread_id
-role NULL
-instrumented YES
CREATE TEMPORARY TABLE t1 AS
SELECT thread_id FROM performance_schema.threads
WHERE name LIKE 'thread/sql%';
@@ -113,7 +113,7 @@ WHERE t1.name LIKE 'thread/sql%'
ORDER BY parent_thread_name, child_thread_name;
parent_thread_name child_thread_name
thread/sql/event_scheduler thread/sql/event_worker
+thread/sql/main thread/sql/manager
thread/sql/main thread/sql/one_connection
thread/sql/main thread/sql/signal_handler
-thread/sql/main thread/sql/slave_background
thread/sql/one_connection thread/sql/event_scheduler
diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result
index ab726a0e22f..40c07805315 100644
--- a/mysql-test/suite/plugins/r/server_audit.result
+++ b/mysql-test/suite/plugins/r/server_audit.result
@@ -118,6 +118,7 @@ CREATE USER u1 IDENTIFIED BY 'pwd-123';
GRANT ALL ON sa_db TO u2 IDENTIFIED BY "pwd-321";
SET PASSWORD FOR u1 = PASSWORD('pwd 098');
CREATE USER u3 IDENTIFIED BY '';
+ALTER USER u3 IDENTIFIED BY 'pwd-456';
drop user u1, u2, u3;
set global server_audit_events='query_ddl';
create table t1(id int);
@@ -139,6 +140,10 @@ select 2;
2
2
drop table t1;
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
set global server_audit_events='query_ddl,query_dml';
create table t1(id int);
insert into t1 values (1), (2);
@@ -210,6 +215,14 @@ select 2;
2
2
drop table t1;
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
set global server_audit_events='table';
set global server_audit_incl_users='user1';
create user user1@localhost;
@@ -227,6 +240,7 @@ set global server_audit_logging= on;
disconnect cn1;
drop user user1@localhost;
set global server_audit_events='';
+set global server_audit_incl_users='root, plug_dest';
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
connect(localhost,plug,plug_dest,test,MYSQL_PORT,MYSQL_SOCK);
@@ -277,7 +291,7 @@ server_audit_file_path
server_audit_file_rotate_now OFF
server_audit_file_rotate_size 1000000
server_audit_file_rotations 9
-server_audit_incl_users root
+server_audit_incl_users root, plug_dest
server_audit_logging ON
server_audit_mode 1
server_audit_output_type file
@@ -392,6 +406,8 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY *****',0
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'ALTER USER u3 IDENTIFIED BY *****',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
@@ -402,6 +418,10 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop table t1',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create procedure pr1() insert into test.t1 values ("foo", 42)',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create function fn1(i int) returns int deterministic return i+1',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop procedure pr1',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop function fn1',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select * from t1',0
@@ -431,6 +451,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_events=\'\'',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_incl_users=\'root, plug_dest\'',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
@@ -452,8 +473,9 @@ TIME,HOSTNAME,plug,localhost,ID,0,DISCONNECT,,,0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'GRANT PROXY ON plug_dest TO plug',0
-TIME,HOSTNAME,plug,localhost,ID,0,PROXY_CONNECT,test,`plug_dest`@`%`,0
TIME,HOSTNAME,plug,localhost,ID,0,CONNECT,test,,0
+TIME,HOSTNAME,plug,localhost,ID,0,PROXY_CONNECT,test,`plug_dest`@`%`,0
+TIME,HOSTNAME,plug,localhost,ID,ID,QUERY,test,'select USER(),CURRENT_USER()',0
TIME,HOSTNAME,plug,localhost,ID,0,DISCONNECT,test,,0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
diff --git a/mysql-test/suite/plugins/t/multiauth.test b/mysql-test/suite/plugins/t/multiauth.test
index cc6fc0c8644..4cf44fc7c65 100644
--- a/mysql-test/suite/plugins/t/multiauth.test
+++ b/mysql-test/suite/plugins/t/multiauth.test
@@ -1,3 +1,4 @@
+--source include/not_ubsan.inc
#
# MDEV-11340 Allow multiple alternative authentication methods for the same user
#
diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test
index 787541f7ca0..01da49756d0 100644
--- a/mysql-test/suite/plugins/t/server_audit.test
+++ b/mysql-test/suite/plugins/t/server_audit.test
@@ -95,6 +95,7 @@ CREATE USER u1 IDENTIFIED BY 'pwd-123';
GRANT ALL ON sa_db TO u2 IDENTIFIED BY "pwd-321";
SET PASSWORD FOR u1 = PASSWORD('pwd 098');
CREATE USER u3 IDENTIFIED BY '';
+ALTER USER u3 IDENTIFIED BY 'pwd-456';
drop user u1, u2, u3;
set global server_audit_events='query_ddl';
@@ -106,6 +107,12 @@ select 2;
/*! select 2*/;
/*comment*/ select 2;
drop table t1;
+
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
+
set global server_audit_events='query_ddl,query_dml';
create table t1(id int);
insert into t1 values (1), (2);
@@ -145,6 +152,15 @@ insert into t1 values (1), (2);
select * from t1;
select 2;
drop table t1;
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
+
+create procedure pr1() insert into test.t1 values ("foo", 42);
+create function fn1(i int) returns int deterministic return i+1;
+drop procedure pr1;
+drop function fn1;
set global server_audit_events='table';
set global server_audit_incl_users='user1';
@@ -173,6 +189,7 @@ source include/wait_until_count_sessions.inc;
drop user user1@localhost;
set global server_audit_events='';
+set global server_audit_incl_users='root, plug_dest';
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def
index 89140a19ac8..1ed8651da17 100644
--- a/mysql-test/suite/rpl/disabled.def
+++ b/mysql-test/suite/rpl/disabled.def
@@ -10,8 +10,7 @@
#
##############################################################################
-rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fails on PB2 hpux
-#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock
+#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings
rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table
rpl_row_binlog_max_cache_size : MDEV-11092
rpl_row_index_choice : MDEV-11666
diff --git a/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test b/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
index 0f46b00f683..4c93ad86209 100644
--- a/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
+++ b/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
@@ -49,14 +49,14 @@ connection master;
--echo *** Single statement on transactional table ***
--disable_query_log
---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE
+--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534
eval INSERT INTO t1 (a, data) VALUES (1,
CONCAT($data, $data, $data, $data, $data));
--enable_query_log
--echo *** Single statement on non-transactional table ***
--disable_query_log
---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE
+--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534
eval INSERT INTO t2 (a, data) VALUES (2,
CONCAT($data, $data, $data, $data, $data, $data));
--enable_query_log
diff --git a/mysql-test/suite/rpl/include/rpl_semi_sync.inc b/mysql-test/suite/rpl/include/rpl_semi_sync.inc
index 393b49372e1..c3cd918b5fc 100644
--- a/mysql-test/suite/rpl/include/rpl_semi_sync.inc
+++ b/mysql-test/suite/rpl/include/rpl_semi_sync.inc
@@ -9,7 +9,6 @@ source include/have_innodb.inc;
source include/master-slave.inc;
let $engine_type= InnoDB;
-#let $engine_type= MyISAM;
# Suppress warnings that might be generated during the test
connection master;
@@ -94,7 +93,6 @@ enable_query_log;
echo [ status of semi-sync on master should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
---replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
# reset master to make sure the following test will start with a clean environment
@@ -201,16 +199,23 @@ connection slave;
source include/stop_slave.inc;
connection master;
+--source include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
# The first semi-sync check should be on because after slave stop,
# there are no transactions on the master.
echo [ master status should be ON ];
-show status like 'Rpl_semi_sync_master_status';
+
+let $status_var= Rpl_semi_sync_master_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
+
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 0;
+source include/wait_for_status_var.inc;
+
show status like 'Rpl_semi_sync_master_no_tx';
---replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
-show status like 'Rpl_semi_sync_master_clients';
echo [ semi-sync replication of these transactions will fail ];
insert into t1 values (500);
@@ -225,7 +230,6 @@ source include/wait_for_status_var.inc;
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
---replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
# Semi-sync status on master is now OFF, so all these transactions
@@ -246,7 +250,6 @@ insert into t1 values (100);
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
---replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
@@ -274,9 +277,11 @@ connection master;
# The master semi-sync status should be on again after slave catches up.
echo [ master status should be ON again after slave catches up ];
-show status like 'Rpl_semi_sync_master_status';
+
+let $status_var= Rpl_semi_sync_master_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_no_tx';
---replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
show status like 'Rpl_semi_sync_master_clients';
@@ -332,11 +337,7 @@ replace_result $engine_type ENGINE_TYPE;
eval create table t1 (a int) engine = $engine_type;
drop table t1;
-##show status like 'Rpl_semi_sync_master_status';
-
sync_slave_with_master;
---replace_column 2 #
-show status like 'Rpl_relay%';
echo [ test reset master ];
connection master;
@@ -353,19 +354,7 @@ source include/stop_slave.inc;
reset slave;
# Kill the dump thread on master for previous slave connection and
-# wait for it to exit
-connection master;
-let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
-if ($_tid)
-{
- --replace_result $_tid _tid
- eval kill query $_tid;
-
- # After dump thread exit, Rpl_semi_sync_master_clients will be 0
- let $status_var= Rpl_semi_sync_master_clients;
- let $status_var_value= 0;
- source include/wait_for_status_var.inc;
-}
+--source include/kill_binlog_dump_threads.inc
connection slave;
source include/start_slave.inc;
@@ -404,17 +393,7 @@ connection master;
reset master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
-let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
-if ($_tid)
-{
- --replace_result $_tid _tid
- eval kill query $_tid;
-
- # After dump thread exit, Rpl_semi_sync_master_clients will be 0
- let $status_var= Rpl_semi_sync_master_clients;
- let $status_var_value= 0;
- source include/wait_for_status_var.inc;
-}
+--source include/kill_binlog_dump_threads.inc
# Do not binlog the following statement because it will generate
# different events for ROW and STATEMENT format
@@ -459,21 +438,16 @@ SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
connection master;
# Kill the dump thread on master for previous slave connection and wait for it to exit
-let $_tid= `select id from information_schema.processlist where command = 'Binlog Dump' limit 1`;
-if ($_tid)
-{
- --replace_result $_tid _tid
- eval kill query $_tid;
-
- # After dump thread exit, Rpl_semi_sync_master_clients will be 0
- let $status_var= Rpl_semi_sync_master_clients;
- let $status_var_value= 0;
- source include/wait_for_status_var.inc;
-}
+--source include/kill_binlog_dump_threads.inc
echo [ Semi-sync status on master should be ON ];
-show status like 'Rpl_semi_sync_master_clients';
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 0;
+source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_status';
+let $status_var= Rpl_semi_sync_master_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
set global rpl_semi_sync_master_enabled= 0;
connection slave;
diff --git a/mysql-test/suite/rpl/r/rpl_change_master.result b/mysql-test/suite/rpl/r/rpl_change_master.result
index 5439bdef72c..48cec72d917 100644
--- a/mysql-test/suite/rpl/r/rpl_change_master.result
+++ b/mysql-test/suite/rpl/r/rpl_change_master.result
@@ -22,4 +22,8 @@ n
connection master;
drop table t1;
connection slave;
+connection master;
+CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_CERT='',
+MASTER_SSL_KEY='', MASTER_SSL_CRL='', MASTER_SSL_CRLPATH='';
+CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_ignore_grant.result b/mysql-test/suite/rpl/r/rpl_ignore_grant.result
index 64da944a5dc..0a5564ac6c0 100644
--- a/mysql-test/suite/rpl/r/rpl_ignore_grant.result
+++ b/mysql-test/suite/rpl/r/rpl_ignore_grant.result
@@ -1,14 +1,12 @@
include/master-slave.inc
[connection master]
connection master;
-set global sql_mode="";
-set local sql_mode="";
+set sql_mode="";
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
connection slave;
-set global sql_mode="";
-set local sql_mode="";
+set sql_mode="";
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
@@ -43,7 +41,5 @@ connection slave;
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
-set global sql_mode=default;
connection master;
-set global sql_mode=default;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_master_pos_wait.result b/mysql-test/suite/rpl/r/rpl_master_pos_wait.result
index 04f55fc1263..aae3418b546 100644
--- a/mysql-test/suite/rpl/r/rpl_master_pos_wait.result
+++ b/mysql-test/suite/rpl/r/rpl_master_pos_wait.result
@@ -12,6 +12,7 @@ Note 1003 select master_pos_wait('master-bin.999999',0,2) AS `master_pos_wait('m
select master_pos_wait('master-bin.999999',0);
connection slave1;
stop slave sql_thread;
+include/wait_for_slave_sql_to_stop.inc
connection slave;
master_pos_wait('master-bin.999999',0)
NULL
diff --git a/mysql-test/suite/rpl/r/rpl_relay_max_extension.result b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result
new file mode 100644
index 00000000000..4444398203e
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result
@@ -0,0 +1,37 @@
+include/rpl_init.inc [topology=1->2]
+connection server_2;
+include/stop_slave.inc
+RESET SLAVE;
+include/start_slave.inc
+include/stop_slave.inc
+#
+# Stop slave server
+#
+#
+# Simulate file number get close to 999997
+# by renaming relay logs and modifying index/info files
+#
+# Restart slave server
+#
+SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads;
+SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size;
+SET GLOBAL slave_parallel_threads=1;
+SET GLOBAL max_relay_log_size=100 * 1024;
+include/start_slave.inc
+connection server_1;
+create table t1 (i int, c varchar(1024));
+#
+# Insert some data to generate enough amount of binary logs
+#
+connection server_2;
+#
+# Assert that 'slave-relay-bin.999999' is purged.
+#
+NOT FOUND /slave-relay-bin.999999/ in slave-relay-bin.index
+include/stop_slave.inc
+SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads;
+SET GLOBAL max_relay_log_size= @save_max_relay_log_size;
+include/start_slave.inc
+connection server_1;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_USER.result b/mysql-test/suite/rpl/r/rpl_row_USER.result
index 5a59bb57795..2771c674f44 100644
--- a/mysql-test/suite/rpl/r/rpl_row_USER.result
+++ b/mysql-test/suite/rpl/r/rpl_row_USER.result
@@ -1,6 +1,5 @@
include/master-slave.inc
[connection master]
-set global sql_mode="";
set local sql_mode="";
connection master;
DROP DATABASE IF EXISTS mysqltest1;
@@ -40,7 +39,6 @@ REVOKE ALL ON mysqltest1.* FROM 'tester'@'%';
REVOKE ALL ON mysqltest1.* FROM ''@'localhost%';
DROP USER tester@'%';
DROP USER ''@'localhost%';
+FLUSH PRIVILEGES;
connection slave;
include/rpl_end.inc
-set global sql_mode=default;
-set local sql_mode=default;
diff --git a/mysql-test/suite/rpl/r/rpl_row_utf32.result b/mysql-test/suite/rpl/r/rpl_row_utf32.result
index af6e709860e..6d177b7cda0 100644
--- a/mysql-test/suite/rpl/r/rpl_row_utf32.result
+++ b/mysql-test/suite/rpl/r/rpl_row_utf32.result
@@ -3,7 +3,7 @@ include/master-slave.inc
SET SQL_LOG_BIN=0;
CREATE TABLE t1 (c1 char(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SET SQL_LOG_BIN=1;
connection slave;
SET @saved_slave_type_conversions= @@global.slave_type_conversions;
@@ -13,7 +13,7 @@ include/start_slave.inc
SET SQL_LOG_BIN=0;
CREATE TABLE t1 ( c1 varchar(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SET SQL_LOG_BIN=1;
connection master;
INSERT INTO t1(c1) VALUES ('insert into t1');
diff --git a/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result b/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result
new file mode 100644
index 00000000000..f76d8935fa8
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result
@@ -0,0 +1,380 @@
+include/master-slave.inc
+[connection master]
+#
+# Test case 1: KEY on a virtual column with ON DELETE CASCADE
+#
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY,
+t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (90,1,NULL);
+INSERT INTO t2 VALUES (91,2,default);
+DELETE FROM t1 WHERE id=1;
+connection slave;
+#
+# Verify data consistency on slave
+#
+include/diff_tables.inc [master:test.t1, slave:test.t1]
+include/diff_tables.inc [master:test.t2, slave:test.t2]
+connection master;
+DROP TABLE t2,t1;
+connection slave;
+#
+# Test case 2: Verify "ON DELETE CASCADE" for parent->child->child scenario
+# Parent table: users
+# Child tables: matchmaking_groups, matchmaking_group_users
+# Parent table: matchmaking_groups
+# Child tables: matchmaking_group_users, matchmaking_group_maps
+#
+# Deleting a row from parent table should be reflected in
+# child tables.
+# matchmaking_groups->matchmaking_group_users->matchmaking_group_maps
+# users->matchmaking_group_users->matchmaking_group_maps
+#
+connection master;
+CREATE TABLE users (id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
+name VARCHAR(32) NOT NULL DEFAULT ''
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+CREATE TABLE matchmaking_groups (
+id BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
+host_user_id INT UNSIGNED NOT NULL UNIQUE,
+v_col INT AS (host_user_id+1) VIRTUAL, KEY (v_col),
+CONSTRAINT FOREIGN KEY (host_user_id) REFERENCES users (id)
+ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+CREATE TABLE matchmaking_group_users (
+matchmaking_group_id BIGINT UNSIGNED NOT NULL,
+user_id INT UNSIGNED NOT NULL,
+v_col1 int as (user_id+1) virtual, KEY (v_col1),
+PRIMARY KEY (matchmaking_group_id,user_id),
+UNIQUE KEY user_id (user_id),
+CONSTRAINT FOREIGN KEY (matchmaking_group_id)
+REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE,
+CONSTRAINT FOREIGN KEY (user_id)
+REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+CREATE TABLE matchmaking_group_maps (
+matchmaking_group_id BIGINT UNSIGNED NOT NULL,
+map_id TINYINT UNSIGNED NOT NULL,
+v_col2 INT AS (map_id+1) VIRTUAL, KEY (v_col2),
+PRIMARY KEY (matchmaking_group_id,map_id),
+CONSTRAINT FOREIGN KEY (matchmaking_group_id)
+REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+connection slave;
+connection master;
+INSERT INTO users VALUES (NULL,'foo'),(NULL,'bar');
+INSERT INTO matchmaking_groups VALUES (10,1,default),(11,2,default);
+INSERT INTO matchmaking_group_users VALUES (10,1,default),(11,2,default);
+INSERT INTO matchmaking_group_maps VALUES (10,55,default),(11,66,default);
+DELETE FROM matchmaking_groups WHERE id = 10;
+connection slave;
+#
+# No rows should be returned as ON DELETE CASCASE should have removed
+# corresponding rows from child tables. There should not any mismatch
+# of 'id' field between parent->child.
+#
+SELECT * FROM matchmaking_group_users WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups);
+matchmaking_group_id user_id v_col1
+SELECT * FROM matchmaking_group_maps WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups);
+matchmaking_group_id map_id v_col2
+#
+# Rows with id=11 should be present
+#
+SELECT * FROM matchmaking_group_users;
+matchmaking_group_id user_id v_col1
+11 2 3
+SELECT * FROM matchmaking_group_maps;
+matchmaking_group_id map_id v_col2
+11 66 67
+connection master;
+DELETE FROM users WHERE id = 2;
+connection slave;
+#
+# No rows should be present in both the child tables
+#
+SELECT * FROM matchmaking_group_users;
+matchmaking_group_id user_id v_col1
+SELECT * FROM matchmaking_group_maps;
+matchmaking_group_id map_id v_col2
+connection master;
+DROP TABLE matchmaking_group_maps, matchmaking_group_users, matchmaking_groups, users;
+connection slave;
+#
+# Test case 3: KEY on a virtual column with ON UPDATE CASCADE
+#
+connection master;
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 80);
+CREATE TABLE t2 (a INT KEY, b INT,
+v_col int as (b+1) virtual, KEY (v_col),
+CONSTRAINT b FOREIGN KEY (b) REFERENCES t1(a) ON UPDATE CASCADE
+) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (51, 1, default);
+connection slave;
+connection master;
+UPDATE t1 SET a = 50 WHERE a = 1;
+#
+# Master: Verify that ON UPDATE CASCADE works fine
+# old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51)
+#
+SELECT * FROM t2 WHERE b=50;
+a b v_col
+51 50 51
+connection slave;
+#
+# Slave: Verify that ON UPDATE CASCADE works fine
+# old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51)
+#
+SELECT * FROM t2 WHERE b=50;
+a b v_col
+51 50 51
+connection master;
+DROP TABLE t2, t1;
+connection slave;
+#
+# Test case 4: Define triggers on master, their results should be
+# replicated as part of row events and they should be
+# applied on slave with the default
+# slave_run_triggers_for_rbr=NO
+#
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (count INT NOT NULL) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (1);
+INSERT INTO t1 VALUES (2),(3);
+connection slave;
+SHOW GLOBAL VARIABLES LIKE 'slave_run_triggers_for_rbr';
+Variable_name Value
+slave_run_triggers_for_rbr NO
+#
+# As two rows are inserted in table 't1', two rows should get inserted
+# into table 't2' as part of trigger.
+#
+include/assert.inc [Table t2 should have two rows.]
+connection master;
+DROP TABLE t1,t2;
+connection slave;
+#
+# Test case 5: Define triggers + Foreign Keys on master, their results
+# should be replicated as part of row events and master
+# and slave should be in sync.
+#
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t3 VALUES (1);
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+connection slave;
+#
+# As two rows are inserted in table 't1', two rows should get inserted
+# into table 't3' as part of trigger.
+#
+include/assert.inc [Table t3 should have two rows.]
+#
+# Verify ON DELETE CASCASE correctness
+#
+connection master;
+DELETE FROM t1 WHERE id=2;
+connection slave;
+connection master;
+include/diff_tables.inc [master:test.t1, slave:test.t1]
+include/diff_tables.inc [master:test.t2, slave:test.t2]
+include/diff_tables.inc [master:test.t3, slave:test.t3]
+DROP TABLE t3,t2,t1;
+connection slave;
+#
+# Test case 6: Triggers are present only on slave and
+# 'slave_run_triggers_for_rbr=NO'
+#
+connection slave;
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr= NO;;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+Variable_name Value
+slave_run_triggers_for_rbr NO
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col),
+KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+connection slave;
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+connection master;
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+connection slave;
+#
+# Count must be 0
+#
+include/assert.inc [Table t3 should have zero rows.]
+connection master;
+DELETE FROM t1 WHERE id=2;
+connection slave;
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+#
+# Verify t1, t2 are consistent on slave.
+#
+include/diff_tables.inc [master:test.t1, slave:test.t1]
+include/diff_tables.inc [master:test.t2, slave:test.t2]
+connection master;
+DROP TABLE t3,t2,t1;
+connection slave;
+#
+# Test case 7: Triggers are present only on slave and
+# 'slave_run_triggers_for_rbr=YES'
+#
+connection slave;
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr= YES;;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+Variable_name Value
+slave_run_triggers_for_rbr YES
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col),
+KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+connection slave;
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+connection master;
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+connection slave;
+#
+# Count must be 2
+#
+include/assert.inc [Table t3 should have two rows.]
+connection master;
+DELETE FROM t1 WHERE id=2;
+connection slave;
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+#
+# Verify t1, t2 are consistent on slave.
+#
+include/diff_tables.inc [master:test.t1, slave:test.t1]
+include/diff_tables.inc [master:test.t2, slave:test.t2]
+connection master;
+DROP TABLE t3,t2,t1;
+connection slave;
+#
+# Test case 8: Triggers and Foreign Keys are present only on slave and
+# 'slave_run_triggers_for_rbr=NO'
+#
+connection slave;
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr= NO;;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+Variable_name Value
+slave_run_triggers_for_rbr NO
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+SET sql_log_bin=0;
+CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB;
+SET sql_log_bin=1;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+connection slave;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+connection master;
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+connection slave;
+#
+# Count must be 0
+#
+include/assert.inc [Table t3 should have zero rows.]
+connection master;
+DELETE FROM t1 WHERE id=2;
+# t1: Should have one row
+SELECT * FROM t1;
+id
+3
+# t2: Should have two rows
+SELECT * FROM t2;
+t1_id v_col
+2 3
+3 4
+connection slave;
+# t1: Should have one row
+SELECT * FROM t1;
+id
+3
+# t2: Should have one row on slave due to ON DELETE CASCASE
+SELECT * FROM t2;
+t1_id v_col
+3 4
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+connection master;
+DROP TABLE t3,t2,t1;
+connection slave;
+#
+# Test case 9: Triggers are Foreign Keys are present only on slave and
+# 'slave_run_triggers_for_rbr=YES'
+#
+connection slave;
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+SET GLOBAL slave_run_triggers_for_rbr= YES;;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+Variable_name Value
+slave_run_triggers_for_rbr YES
+connection master;
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+SET sql_log_bin=0;
+CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB;
+SET sql_log_bin=1;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+connection slave;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+connection master;
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+connection slave;
+#
+# Count must be 2
+#
+include/assert.inc [Table t3 should have two rows.]
+connection master;
+DELETE FROM t1 WHERE id=2;
+# t1: Should have one row
+SELECT * FROM t1;
+id
+3
+# t2: Should have two rows
+SELECT * FROM t2;
+t1_id v_col
+2 3
+3 4
+connection slave;
+# t1: Should have one row
+SELECT * FROM t1;
+id
+3
+# t2: Should have one row on slave due to ON DELETE CASCASE
+SELECT * FROM t2;
+t1_id v_col
+3 4
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+connection master;
+DROP TABLE t3,t2,t1;
+connection slave;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync.result b/mysql-test/suite/rpl/r/rpl_semi_sync.result
index 106efb555d3..d18bd1efda7 100644
--- a/mysql-test/suite/rpl/r/rpl_semi_sync.result
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync.result
@@ -164,20 +164,15 @@ connection slave;
connection slave;
include/stop_slave.inc
connection master;
+include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
[ master status should be ON ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 0
show status like 'Rpl_semi_sync_master_yes_tx';
Variable_name Value
Rpl_semi_sync_master_yes_tx 14
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
[ semi-sync replication of these transactions will fail ]
insert into t1 values (500);
[ master status should be OFF ]
@@ -235,9 +230,6 @@ max(a)
500
connection master;
[ master status should be ON again after slave catches up ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 12
@@ -304,8 +296,6 @@ connection master;
create table t1 (a int) engine = ENGINE_TYPE;
drop table t1;
connection slave;
-show status like 'Rpl_relay%';
-Variable_name Value
[ test reset master ]
connection master;
reset master;
@@ -321,7 +311,7 @@ Rpl_semi_sync_master_yes_tx 0
connection slave;
include/stop_slave.inc
reset slave;
-connection master;
+include/kill_binlog_dump_threads.inc
connection slave;
include/start_slave.inc
connection master;
@@ -353,6 +343,7 @@ include/stop_slave.inc
reset slave;
connection master;
reset master;
+include/kill_binlog_dump_threads.inc
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
@@ -403,10 +394,8 @@ SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status OFF
connection master;
+include/kill_binlog_dump_threads.inc
[ Semi-sync status on master should be ON ]
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status ON
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync.result b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync.result
index c61340f3967..f2240817489 100644
--- a/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync.result
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync.result
@@ -165,20 +165,15 @@ connection slave;
connection slave;
include/stop_slave.inc
connection master;
+include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
[ master status should be ON ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 0
show status like 'Rpl_semi_sync_master_yes_tx';
Variable_name Value
Rpl_semi_sync_master_yes_tx 16
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
[ semi-sync replication of these transactions will fail ]
insert into t1 values (500);
[ master status should be OFF ]
@@ -236,9 +231,6 @@ max(a)
500
connection master;
[ master status should be ON again after slave catches up ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 12
@@ -305,8 +297,6 @@ connection master;
create table t1 (a int) engine = ENGINE_TYPE;
drop table t1;
connection slave;
-show status like 'Rpl_relay%';
-Variable_name Value
[ test reset master ]
connection master;
reset master;
@@ -322,7 +312,7 @@ Rpl_semi_sync_master_yes_tx 0
connection slave;
include/stop_slave.inc
reset slave;
-connection master;
+include/kill_binlog_dump_threads.inc
connection slave;
include/start_slave.inc
connection master;
@@ -354,6 +344,7 @@ include/stop_slave.inc
reset slave;
connection master;
reset master;
+include/kill_binlog_dump_threads.inc
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
@@ -404,10 +395,8 @@ SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status OFF
connection master;
+include/kill_binlog_dump_threads.inc
[ Semi-sync status on master should be ON ]
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status ON
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_row.result b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_row.result
index 6a23f24b66d..fcced801d65 100644
--- a/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_row.result
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_row.result
@@ -165,20 +165,15 @@ connection slave;
connection slave;
include/stop_slave.inc
connection master;
+include/kill_binlog_dump_threads.inc
set global rpl_semi_sync_master_timeout= 5000;
[ master status should be ON ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 0
show status like 'Rpl_semi_sync_master_yes_tx';
Variable_name Value
Rpl_semi_sync_master_yes_tx 14
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
[ semi-sync replication of these transactions will fail ]
insert into t1 values (500);
[ master status should be OFF ]
@@ -236,9 +231,6 @@ max(a)
500
connection master;
[ master status should be ON again after slave catches up ]
-show status like 'Rpl_semi_sync_master_status';
-Variable_name Value
-Rpl_semi_sync_master_status ON
show status like 'Rpl_semi_sync_master_no_tx';
Variable_name Value
Rpl_semi_sync_master_no_tx 12
@@ -305,8 +297,6 @@ connection master;
create table t1 (a int) engine = ENGINE_TYPE;
drop table t1;
connection slave;
-show status like 'Rpl_relay%';
-Variable_name Value
[ test reset master ]
connection master;
reset master;
@@ -322,7 +312,7 @@ Rpl_semi_sync_master_yes_tx 0
connection slave;
include/stop_slave.inc
reset slave;
-connection master;
+include/kill_binlog_dump_threads.inc
connection slave;
include/start_slave.inc
connection master;
@@ -354,6 +344,7 @@ include/stop_slave.inc
reset slave;
connection master;
reset master;
+include/kill_binlog_dump_threads.inc
set sql_log_bin=0;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl_password';
flush privileges;
@@ -404,10 +395,8 @@ SHOW STATUS LIKE 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status OFF
connection master;
+include/kill_binlog_dump_threads.inc
[ Semi-sync status on master should be ON ]
-show status like 'Rpl_semi_sync_master_clients';
-Variable_name Value
-Rpl_semi_sync_master_clients 0
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status ON
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_slave_compressed_protocol.result b/mysql-test/suite/rpl/r/rpl_semi_sync_slave_compressed_protocol.result
new file mode 100644
index 00000000000..b0fe083f928
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync_slave_compressed_protocol.result
@@ -0,0 +1,19 @@
+include/master-slave.inc
+[connection master]
+SET @@GLOBAL.rpl_semi_sync_master_enabled = 1;
+connection slave;
+include/stop_slave.inc
+SET @@GLOBAL.rpl_semi_sync_slave_enabled = 1;
+include/start_slave.inc
+connection master;
+CREATE TABLE t1 (i INT);
+DROP TABLE t1;
+include/rpl_sync.inc
+include/assert_grep.inc [Check that there is no 'Read semi-sync reply magic number error' in error log.]
+connection master;
+SET @@GLOBAL. rpl_semi_sync_master_enabled = $sav_enabled_master;
+connection slave;
+include/stop_slave.inc
+SET @@GLOBAL. rpl_semi_sync_slave_enabled = $sav_enabled_slave;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_sp.result b/mysql-test/suite/rpl/r/rpl_sp.result
index 686380427b5..5216a3b0a44 100644
--- a/mysql-test/suite/rpl/r/rpl_sp.result
+++ b/mysql-test/suite/rpl/r/rpl_sp.result
@@ -1,7 +1,6 @@
include/master-slave.inc
[connection master]
set local sql_mode='';
-set global sql_mode='';
drop database if exists mysqltest1;
create database mysqltest1;
use mysqltest1;
@@ -175,16 +174,16 @@ fn1(20)
insert into t2 values(fn1(21));
select * from t1;
a
-21
20
+21
select * from t2;
a
23
connection slave;
select * from t1;
a
-21
20
+21
select * from t2;
a
23
@@ -243,7 +242,7 @@ return unix_timestamp();
end NONE
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
-end zedjzlcsjhd@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
+end zedjzlcsjhd@localhost # # STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
end NONE
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
@@ -268,7 +267,7 @@ return unix_timestamp();
end NONE
mysqltest1 fn2 FUNCTION fn2 SQL NO_SQL NO DEFINER int(11) begin
return unix_timestamp();
-end zedjzlcsjhd@localhost # # latin1 latin1_swedish_ci latin1_swedish_ci begin
+end zedjzlcsjhd@localhost # # STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION latin1 latin1_swedish_ci latin1_swedish_ci begin
return unix_timestamp();
end NONE
mysqltest1 fn3 FUNCTION fn3 SQL READS_SQL_DATA NO DEFINER int(11) begin
@@ -865,6 +864,7 @@ SET TIMESTAMP=t/*!*/;
grant SELECT, INSERT on mysqltest1.t2 to "zedjzlcsjhd"@127.0.0.1
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` PROCEDURE `foo4`()
DETERMINISTIC
begin
@@ -883,6 +883,7 @@ COMMIT
START TRANSACTION
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
insert into t1 values (15)
/*!*/;
SET TIMESTAMP=t/*!*/;
@@ -891,17 +892,20 @@ COMMIT
START TRANSACTION
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
insert into t2 values(3)
/*!*/;
SET TIMESTAMP=t/*!*/;
COMMIT
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
alter procedure foo4 sql security invoker
/*!*/;
START TRANSACTION
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
insert into t2 values(3)
/*!*/;
SET TIMESTAMP=t/*!*/;
@@ -918,6 +922,7 @@ COMMIT
START TRANSACTION
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
delete from t2
/*!*/;
SET TIMESTAMP=t/*!*/;
@@ -1015,6 +1020,7 @@ SET TIMESTAMP=t/*!*/;
COMMIT
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=1411383296/*!*/;
CREATE DEFINER=`zedjzlcsjhd`@`127.0.0.1` FUNCTION `fn2`() RETURNS int(11)
NO SQL
begin
@@ -1022,6 +1028,7 @@ return unix_timestamp();
end
/*!*/;
SET TIMESTAMP=t/*!*/;
+SET @@session.sql_mode=0/*!*/;
CREATE DEFINER=`root`@`localhost` FUNCTION `fn3`() RETURNS int(11)
READS SQL DATA
begin
@@ -1384,4 +1391,3 @@ drop procedure p1;
# End of 5.5 tests.
connection slave;
include/rpl_end.inc
-set global sql_mode=default;
diff --git a/mysql-test/suite/rpl/r/rpl_spec_variables.result b/mysql-test/suite/rpl/r/rpl_spec_variables.result
index 96f63a50ea9..8b4c398f308 100644
--- a/mysql-test/suite/rpl/r/rpl_spec_variables.result
+++ b/mysql-test/suite/rpl/r/rpl_spec_variables.result
@@ -2,14 +2,17 @@ include/master-slave.inc
[connection master]
* auto_increment_increment, auto_increment_offset *
+connection master;
SET @@global.auto_increment_increment=2;
SET @@session.auto_increment_increment=2;
SET @@global.auto_increment_offset=10;
SET @@session.auto_increment_offset=10;
+connection slave;
SET @@global.auto_increment_increment=3;
SET @@session.auto_increment_increment=3;
SET @@global.auto_increment_offset=20;
SET @@session.auto_increment_offset=20;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
INSERT INTO t1 (b) VALUES ('master');
INSERT INTO t1 (b) VALUES ('master');
@@ -17,6 +20,7 @@ SELECT * FROM t1 ORDER BY a;
a b
2 master
4 master
+connection slave;
CREATE TABLE t2 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
INSERT INTO t1 (b) VALUES ('slave');
INSERT INTO t1 (b) VALUES ('slave');
@@ -32,102 +36,123 @@ SELECT * FROM t2 ORDER BY a;
a b
1 slave
4 slave
+connection master;
DROP TABLE IF EXISTS t1,t2;
SET @@global.auto_increment_increment=1;
SET @@session.auto_increment_increment=1;
SET @@global.auto_increment_offset=1;
SET @@session.auto_increment_offset=1;
+connection slave;
SET @@global.auto_increment_increment=1;
SET @@session.auto_increment_increment=1;
SET @@global.auto_increment_offset=1;
SET @@session.auto_increment_offset=1;
+connection slave;
SET auto_increment_increment=1;
SET auto_increment_offset=1;
* character_set_database, collation_server *
+connection master;
SET @restore_master_character_set_database=@@global.character_set_database;
SET @restore_master_collation_server=@@global.collation_server;
SET @@global.character_set_database=latin1;
SET @@session.character_set_database=latin1;
SET @@global.collation_server=latin1_german1_ci;
SET @@session.collation_server=latin1_german1_ci;
+connection slave;
SET @restore_slave_character_set_database=@@global.character_set_database;
SET @restore_slave_collation_server=@@global.collation_server;
SET @@global.character_set_database=utf8;
SET @@session.character_set_database=utf8;
SET @@global.collation_server=utf8_bin;
SET @@session.collation_server=utf8_bin;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+connection slave;
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE utf8_bin DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
SET @@global.collation_server=latin1_swedish_ci;
SET @@session.collation_server=latin1_swedish_ci;
+connection master;
SET @@global.collation_server=latin1_swedish_ci;
SET @@session.collation_server=latin1_swedish_ci;
DROP TABLE IF EXISTS t1,t2;
* default_week_format *
+connection master;
SET @@global.default_week_format=0;
SET @@session.default_week_format=0;
+connection slave;
SET @@global.default_week_format=1;
SET @@session.default_week_format=1;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1, 'master ', WEEK('2008-01-07'));
SELECT * FROM t1 ORDER BY a;
a b c
1 master 1
+connection slave;
INSERT INTO t1 VALUES (2, 'slave ', WEEK('2008-01-07'));
SELECT * FROM t1 ORDER BY a;
a b c
1 master 1
2 slave 2
+connection master;
DROP TABLE t1;
+connection slave;
SET @@global.default_week_format=0;
SET @@session.default_week_format=0;
* local_infile *
+connection slave;
SET @@global.local_infile=0;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(20), c CHAR(254)) ENGINE=MyISAM;
LOAD DATA LOCAL INFILE 'FILE' INTO TABLE t1 (b);
SELECT COUNT(*) FROM t1;
COUNT(*)
70
+connection slave;
LOAD DATA LOCAL INFILE 'FILE2' INTO TABLE t1 (b);
-ERROR 42000: The used command is not allowed with this MySQL version
+ERROR 42000: The used command is not allowed with this MariaDB version
SELECT COUNT(*) FROM t1;
COUNT(*)
70
SET @@global.local_infile=1;
+connection master;
DROP TABLE t1;
* max_heap_table_size *
+connection slave;
SET @restore_slave_max_heap_table_size=@@global.max_heap_table_size;
SET @@global.max_heap_table_size=16384;
SET @@session.max_heap_table_size=16384;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10), c CHAR(254)) ENGINE=MEMORY;
SELECT COUNT(*)=2000 FROM t1;
COUNT(*)=2000
1
+connection slave;
SELECT COUNT(*)=2000 FROM t1 WHERE b='master' GROUP BY b ORDER BY b;
COUNT(*)=2000
1
@@ -137,18 +162,24 @@ COUNT(*)<2000 AND COUNT(*)>0
SELECT COUNT(*)<2000 AND COUNT(*)>0 FROM t2 WHERE b='slave' GROUP BY b ORDER BY b;
COUNT(*)<2000 AND COUNT(*)>0
1
+connection master;
DROP TABLE IF EXISTS t1,t2;
* storage_engine *
+connection master;
SET @restore_master_storage_engine=@@global.storage_engine;
SET @@global.storage_engine=InnoDB;
SET @@session.storage_engine=InnoDB;
+connection slave;
SET @restore_slave_storage_engine=@@global.storage_engine;
SET @@global.storage_engine=Memory;
SET @@session.storage_engine=Memory;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10));
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB;
+connection slave;
CREATE TABLE t3 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10));
+connection master;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -163,6 +194,7 @@ t2 CREATE TABLE `t2` (
`b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connection slave;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -186,37 +218,49 @@ t3 CREATE TABLE `t3` (
) ENGINE=MEMORY DEFAULT CHARSET=latin1
SET @@global.storage_engine=InnoDB;
SET @@session.storage_engine=InnoDB;
+connection master;
DROP TABLE IF EXISTS t1,t2,t3;
* sql_mode *
+connection master;
+SET @old_sql_mode_master= @@global.sql_mode;
SET @@global.sql_mode=ANSI;
SET @@session.sql_mode=ANSI;
+connection slave;
+SET @old_sql_mode_slave= @@global.sql_mode;
SET @@global.sql_mode=TRADITIONAL;
SET @@session.sql_mode=TRADITIONAL;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c DATE);
INSERT INTO t1 VALUES (1, 'master', '0000-00-00');
SELECT * FROM t1 ORDER BY a;
a b c
1 master 0000-00-00
+connection slave;
INSERT INTO t1 VALUES (1, 'slave', '0000-00-00');
-ERROR 22007: Incorrect date value: '0000-00-00' for column 'c' at row 1
+ERROR 22007: Incorrect date value: '0000-00-00' for column `test`.`t1`.`c` at row 1
SELECT * FROM t1 ORDER BY a;
a b c
1 master 0000-00-00
SET @@global.sql_mode='';
SET @@session.sql_mode='';
+connection master;
SET @@global.sql_mode='';
SET @@session.sql_mode='';
DROP TABLE t1;
*** clean up ***
+connection master;
SET @@global.character_set_database=@restore_master_character_set_database;
SET @@global.collation_server=@restore_master_collation_server;
SET @@global.storage_engine=@restore_master_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_master;
+connection slave;
SET @@global.character_set_database=@restore_slave_character_set_database;
SET @@global.collation_server=@restore_slave_collation_server;
SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size;
SET @@global.storage_engine=@restore_slave_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_slave;
call mtr.add_suppression("The table 't[12]' is full");
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_table_options.result b/mysql-test/suite/rpl/r/rpl_table_options.result
index 87fd8c2b2fb..14af4e390c2 100644
--- a/mysql-test/suite/rpl/r/rpl_table_options.result
+++ b/mysql-test/suite/rpl/r/rpl_table_options.result
@@ -5,24 +5,27 @@ set storage_engine=example;
connection slave;
connection master;
create table t1 (a int not null) ull=12340;
+alter table t1 ull=12350;
+Warnings:
+Note 1105 EXAMPLE DEBUG: ULL 12340 -> 12350
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12340
+) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12350
connection slave;
connection slave;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12340 */
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12350 */
set sql_mode=ignore_bad_table_options;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12340
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12350
connection master;
drop table t1;
set storage_engine=default;
diff --git a/mysql-test/suite/rpl/t/rpl_change_master.test b/mysql-test/suite/rpl/t/rpl_change_master.test
index 5e170d5acce..9ab49a585e6 100644
--- a/mysql-test/suite/rpl/t/rpl_change_master.test
+++ b/mysql-test/suite/rpl/t/rpl_change_master.test
@@ -101,4 +101,11 @@ connection master;
drop table t1;
}
+--connection master
+# MDEV-22741: *SAN: ERROR: AddressSanitizer: use-after-poison on address in
+# instrings/strmake.c:36 from change_master (on optimized builds)
+CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_CERT='',
+ MASTER_SSL_KEY='', MASTER_SSL_CRL='', MASTER_SSL_CRLPATH='';
+CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0;
+
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_ignore_grant.test b/mysql-test/suite/rpl/t/rpl_ignore_grant.test
index ee049a515d7..58457c14817 100644
--- a/mysql-test/suite/rpl/t/rpl_ignore_grant.test
+++ b/mysql-test/suite/rpl/t/rpl_ignore_grant.test
@@ -6,16 +6,12 @@ source include/master-slave.inc;
# do not be influenced by other tests.
connection master;
-set global sql_mode="";
-set local sql_mode="";
+set sql_mode="";
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
-save_master_pos;
-connection slave;
-set global sql_mode="";
-set local sql_mode="";
-sync_with_master;
+--sync_slave_with_master
+set sql_mode="";
# as these DELETE were not replicated, we need to do them manually on the
# slave.
delete from mysql.user where user=_binary'rpl_ignore_grant';
@@ -27,9 +23,7 @@ connection master;
grant select on *.* to rpl_ignore_grant@localhost;
grant drop on test.* to rpl_ignore_grant@localhost;
show grants for rpl_ignore_grant@localhost;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
--error 1141 #("no such grant for user")
show grants for rpl_ignore_grant@localhost;
# check it another way
@@ -43,9 +37,7 @@ select count(*) from mysql.db where user=_binary'rpl_ignore_grant';
grant select on *.* to rpl_ignore_grant@localhost;
connection master;
set password for rpl_ignore_grant@localhost=password("does it work?");
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
select password<>_binary'' from mysql.user where user=_binary'rpl_ignore_grant';
# clear what we have done, to not influence other tests.
@@ -53,15 +45,11 @@ connection master;
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
delete from mysql.user where user=_binary'rpl_ignore_grant';
delete from mysql.db where user=_binary'rpl_ignore_grant';
flush privileges;
-set global sql_mode=default;
connection master;
-set global sql_mode=default;
# End of 4.1 tests
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_ignore_table_update.test b/mysql-test/suite/rpl/t/rpl_ignore_table_update.test
index 840052e2f25..6591dbbc6ad 100644
--- a/mysql-test/suite/rpl/t/rpl_ignore_table_update.test
+++ b/mysql-test/suite/rpl/t/rpl_ignore_table_update.test
@@ -24,15 +24,11 @@ create table mysqltest_bar (m int);
insert into mysqltest_bar values(15);
create table t1 (k int);
insert into t1 values(55);
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
select mysqltest_foo.n,mysqltest_bar.m,t1.k from mysqltest_foo,mysqltest_bar,t1;
connection master;
drop table mysqltest_foo,mysqltest_bar,t1;
-save_master_pos;
-connection slave;
-sync_with_master;
+--sync_slave_with_master
drop table mysqltest_foo,mysqltest_bar,t1;
# End of 4.1 tests
diff --git a/mysql-test/suite/rpl/t/rpl_master_pos_wait.test b/mysql-test/suite/rpl/t/rpl_master_pos_wait.test
index d8c8162ed9f..437d8412086 100644
--- a/mysql-test/suite/rpl/t/rpl_master_pos_wait.test
+++ b/mysql-test/suite/rpl/t/rpl_master_pos_wait.test
@@ -10,6 +10,7 @@ explain extended select master_pos_wait('master-bin.999999',0,2);
send select master_pos_wait('master-bin.999999',0);
connection slave1;
stop slave sql_thread;
+--source include/wait_for_slave_sql_to_stop.inc
connection slave;
reap;
diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry.test b/mysql-test/suite/rpl/t/rpl_parallel_retry.test
index 55da54e3c8c..97a3a709eb5 100644
--- a/mysql-test/suite/rpl/t/rpl_parallel_retry.test
+++ b/mysql-test/suite/rpl/t/rpl_parallel_retry.test
@@ -436,6 +436,9 @@ SET @@DEBUG_SYNC='now SIGNAL proceed_by_1000';
--connection spoiler_21
ROLLBACK;
+--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE '%debug sync point%';
+--source include/wait_condition.inc
+
--echo # Release the 2nd worker to proceed
--connection spoiler_22
ROLLBACK;
diff --git a/mysql-test/suite/rpl/t/rpl_relay_max_extension.test b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test
new file mode 100644
index 00000000000..e1e087f2e0e
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test
@@ -0,0 +1,109 @@
+# ==== Purpose ====
+#
+# Test verifies that auto purging mechanism of relay logs works fine when the
+# file extension grows beyond 999999.
+#
+# ==== Implementation ====
+#
+# Steps:
+# 0 - In master-slave setup clear all the relay logs on the slave server.
+# 1 - Start the slave so that new relay logs starting from
+# 'slave-relay-bin.000001' are created.
+# 2 - Get the active relay-log file name by using SHOW SLAVE STATUS.
+# Shutdown the slave server.
+# 3 - Rename active relay log to '999997' in both 'relay-log.info' and
+# 'slave-relay-bin.index' files.
+# 4 - Restart the slave server by configuring 'slave_parallel_threads=1'
+# and 'max_relay_log_size=100K'.
+# 5 - Generate load on master such that few relay logs are generated on
+# slave. The relay log sequence number will change to 7 digits.
+# 6 - Sync slave with master to ensure that relay logs are applied on
+# slave. They should have been automatically purged.
+# 7 - Assert that there is no 'slave-relay-bin.999999' file in
+# 'relay-log.info'.
+#
+# ==== References ====
+#
+# MDEV-8134: The relay-log is not flushed after the slave-relay-log.999999
+# showed
+#
+
+--source include/have_innodb.inc
+--source include/have_binlog_format_row.inc
+--let $rpl_topology=1->2
+--source include/rpl_init.inc
+
+--connection server_2
+--source include/stop_slave.inc
+RESET SLAVE;
+--source include/start_slave.inc
+--source include/stop_slave.inc
+--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1)
+
+--echo #
+--echo # Stop slave server
+--echo #
+
+--let $datadir = `select @@datadir`
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+
+--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/relay-log.info
+--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/slave-relay-bin.index
+
+--echo #
+--echo # Simulate file number get close to 999997
+--echo # by renaming relay logs and modifying index/info files
+
+--move_file $datadir/$relay_log $datadir/slave-relay-bin.999997
+
+--echo #
+--echo # Restart slave server
+--echo #
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads;
+SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size;
+
+SET GLOBAL slave_parallel_threads=1;
+SET GLOBAL max_relay_log_size=100 * 1024;
+--source include/start_slave.inc
+
+--connection server_1
+create table t1 (i int, c varchar(1024));
+--echo #
+--echo # Insert some data to generate enough amount of binary logs
+--echo #
+--let $count = 1000
+--disable_query_log
+while ($count)
+{
+ eval insert into t1 values (1001 - $count, repeat('a',1000));
+ dec $count;
+}
+--enable_query_log
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+
+--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1)
+
+--echo #
+--echo # Assert that 'slave-relay-bin.999999' is purged.
+--echo #
+let SEARCH_FILE=$datadir/slave-relay-bin.index;
+let SEARCH_PATTERN=slave-relay-bin.999999;
+source include/search_pattern_in_file.inc;
+
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads;
+SET GLOBAL max_relay_log_size= @save_max_relay_log_size;
+--source include/start_slave.inc
+
+--connection server_1
+DROP TABLE t1;
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_USER.test b/mysql-test/suite/rpl/t/rpl_row_USER.test
index 31826812efa..405f609f0f6 100644
--- a/mysql-test/suite/rpl/t/rpl_row_USER.test
+++ b/mysql-test/suite/rpl/t/rpl_row_USER.test
@@ -11,7 +11,6 @@
# Includes
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
-set global sql_mode="";
set local sql_mode="";
# Begin clean up test section
@@ -43,23 +42,18 @@ delimiter ;|
CALL mysqltest1.p1();
connection master;
SELECT * FROM mysqltest1.t1 ORDER BY a;
-sync_slave_with_master;
+--sync_slave_with_master
SELECT * FROM mysqltest1.t1 ORDER BY a;
connection master;
# Lets cleanup
-#show binlog events;
DROP DATABASE mysqltest1;
REVOKE ALL ON mysqltest1.* FROM 'tester'@'%';
REVOKE ALL ON mysqltest1.* FROM ''@'localhost%';
DROP USER tester@'%';
DROP USER ''@'localhost%';
-
-sync_slave_with_master;
-
+FLUSH PRIVILEGES;
+--sync_slave_with_master
# End of 5.0 test case
--source include/rpl_end.inc
-
-set global sql_mode=default;
-set local sql_mode=default;
diff --git a/mysql-test/suite/rpl/t/rpl_row_create_table.test b/mysql-test/suite/rpl/t/rpl_row_create_table.test
index 65f14295c19..cb76d6c4dcb 100644
--- a/mysql-test/suite/rpl/t/rpl_row_create_table.test
+++ b/mysql-test/suite/rpl/t/rpl_row_create_table.test
@@ -7,11 +7,6 @@ connection slave;
--source include/have_innodb.inc
connection master;
-# Bug#18326: Do not lock table for writing during prepare of statement
-# The use of the ps protocol causes extra table maps in the binlog, so
-# we disable the ps-protocol for this statement.
---disable_ps_protocol
-
# Set the default storage engine to different values on master and
# slave. We need to stop the slave for the server variable to take
# effect, since the variable is only read on start-up.
diff --git a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
index 2d5cde82dcc..c10b3570ed6 100644
--- a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
+++ b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
@@ -4,10 +4,4 @@
let $rename_event_pos= `select @binlog_start_pos + 819`;
-# Bug#18326: Do not lock table for writing during prepare of statement
-# The use of the ps protocol causes extra table maps in the binlog, so
-# we disable the ps-protocol for this statement.
-
---disable_ps_protocol
-- source include/rpl_flsh_tbls.test
---enable_ps_protocol
diff --git a/mysql-test/suite/rpl/t/rpl_row_tbl_metadata.test b/mysql-test/suite/rpl/t/rpl_row_tbl_metadata.test
index b0588cb2d58..d3a115e986a 100644
--- a/mysql-test/suite/rpl/t/rpl_row_tbl_metadata.test
+++ b/mysql-test/suite/rpl/t/rpl_row_tbl_metadata.test
@@ -329,7 +329,7 @@ while($ntables)
-- echo ### detect failure. Before the patch mysqlbinlog would find
-- echo ### a corrupted event, thence would fail.
-- let $MYSQLD_DATADIR= `SELECT @@datadir`
--- exec $MYSQL_BINLOG -v --hex $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
+-- exec $MYSQL_BINLOG -v --hexdump $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog_bug50018.binlog
## clean up
## For debugging purposes you might want not to remove these
diff --git a/mysql-test/suite/rpl/t/rpl_row_trig002.test b/mysql-test/suite/rpl/t/rpl_row_trig002.test
index 46f9ad91a3d..f04d1e6d478 100644
--- a/mysql-test/suite/rpl/t/rpl_row_trig002.test
+++ b/mysql-test/suite/rpl/t/rpl_row_trig002.test
@@ -48,9 +48,11 @@ INSERT INTO test.t3 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
INSERT INTO test.t2 VALUES ('Yes', 1, NULL, 'spamfilter','scan_incoming');
select * from test.t2;
+--sorted_result
select * from test.t3;
sync_slave_with_master;
select * from test.t2;
+--sorted_result
select * from test.t3;
connection master;
diff --git a/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test b/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test
new file mode 100644
index 00000000000..84ee14977f3
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test
@@ -0,0 +1,425 @@
+# ==== Purpose ====
+#
+# Test verifies that, slave doesn't report any assert on UPDATE or DELETE of
+# row which tries to update the virtual columns with associated KEYs.
+#
+# Test scenarios are listed below.
+# 1) KEY on a virtual column with ON DELETE CASCADE
+# 2) Verify "ON DELETE CASCADE" for parent->child->child scenario
+# 3) KEY on a virtual column with ON UPDATE CASCADE
+# 4) Define triggers on master, their results should be replicated
+# as part of row events and they should be applied on slave with
+# the default slave_run_triggers_for_rbr=NO
+# 5) Define triggers + Foreign Keys on master, their results should be
+# replicated as part of row events and master and slave should be in sync.
+# 6) Triggers are present only on slave and 'slave_run_triggers_for_rbr=NO'
+# 7) Triggers are present only on slave and 'slave_run_triggers_for_rbr=YES'
+# 8) Triggers and Foreign Keys are present only on slave and
+# 'slave_run_triggers_for_rbr=NO'
+# 9) Triggers are Foreign Keys are present only on slave and
+# 'slave_run_triggers_for_rbr=YES'
+#
+# ==== References ====
+#
+# MDEV-23033: All slaves crash once in ~24 hours and loop restart with signal 11
+#
+
+--source include/have_binlog_format_row.inc
+--source include/have_innodb.inc
+--source include/master-slave.inc
+
+
+--echo #
+--echo # Test case 1: KEY on a virtual column with ON DELETE CASCADE
+--echo #
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+
+CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY,
+ t1_id INT NOT NULL,
+ v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+ CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+
+INSERT INTO t2 VALUES (90,1,NULL);
+INSERT INTO t2 VALUES (91,2,default);
+
+# Following query results in an assert on slave
+DELETE FROM t1 WHERE id=1;
+--sync_slave_with_master
+
+--echo #
+--echo # Verify data consistency on slave
+--echo #
+--let $diff_tables= master:test.t1, slave:test.t1
+--source include/diff_tables.inc
+--let $diff_tables= master:test.t2, slave:test.t2
+--source include/diff_tables.inc
+
+--connection master
+DROP TABLE t2,t1;
+--sync_slave_with_master
+
+--echo #
+--echo # Test case 2: Verify "ON DELETE CASCADE" for parent->child->child scenario
+--echo # Parent table: users
+--echo # Child tables: matchmaking_groups, matchmaking_group_users
+--echo # Parent table: matchmaking_groups
+--echo # Child tables: matchmaking_group_users, matchmaking_group_maps
+--echo #
+--echo # Deleting a row from parent table should be reflected in
+--echo # child tables.
+--echo # matchmaking_groups->matchmaking_group_users->matchmaking_group_maps
+--echo # users->matchmaking_group_users->matchmaking_group_maps
+--echo #
+
+--connection master
+CREATE TABLE users (id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
+ name VARCHAR(32) NOT NULL DEFAULT ''
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE matchmaking_groups (
+ id BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
+ host_user_id INT UNSIGNED NOT NULL UNIQUE,
+ v_col INT AS (host_user_id+1) VIRTUAL, KEY (v_col),
+ CONSTRAINT FOREIGN KEY (host_user_id) REFERENCES users (id)
+ ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE matchmaking_group_users (
+ matchmaking_group_id BIGINT UNSIGNED NOT NULL,
+ user_id INT UNSIGNED NOT NULL,
+ v_col1 int as (user_id+1) virtual, KEY (v_col1),
+ PRIMARY KEY (matchmaking_group_id,user_id),
+ UNIQUE KEY user_id (user_id),
+ CONSTRAINT FOREIGN KEY (matchmaking_group_id)
+ REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE,
+ CONSTRAINT FOREIGN KEY (user_id)
+ REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE matchmaking_group_maps (
+ matchmaking_group_id BIGINT UNSIGNED NOT NULL,
+ map_id TINYINT UNSIGNED NOT NULL,
+ v_col2 INT AS (map_id+1) VIRTUAL, KEY (v_col2),
+ PRIMARY KEY (matchmaking_group_id,map_id),
+ CONSTRAINT FOREIGN KEY (matchmaking_group_id)
+ REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+--sync_slave_with_master
+
+--connection master
+INSERT INTO users VALUES (NULL,'foo'),(NULL,'bar');
+INSERT INTO matchmaking_groups VALUES (10,1,default),(11,2,default);
+INSERT INTO matchmaking_group_users VALUES (10,1,default),(11,2,default);
+INSERT INTO matchmaking_group_maps VALUES (10,55,default),(11,66,default);
+
+DELETE FROM matchmaking_groups WHERE id = 10;
+--sync_slave_with_master
+
+--echo #
+--echo # No rows should be returned as ON DELETE CASCASE should have removed
+--echo # corresponding rows from child tables. There should not any mismatch
+--echo # of 'id' field between parent->child.
+--echo #
+SELECT * FROM matchmaking_group_users WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups);
+SELECT * FROM matchmaking_group_maps WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups);
+
+--echo #
+--echo # Rows with id=11 should be present
+--echo #
+SELECT * FROM matchmaking_group_users;
+SELECT * FROM matchmaking_group_maps;
+
+--connection master
+DELETE FROM users WHERE id = 2;
+--sync_slave_with_master
+
+--echo #
+--echo # No rows should be present in both the child tables
+--echo #
+SELECT * FROM matchmaking_group_users;
+SELECT * FROM matchmaking_group_maps;
+
+--connection master
+DROP TABLE matchmaking_group_maps, matchmaking_group_users, matchmaking_groups, users;
+--sync_slave_with_master
+
+--echo #
+--echo # Test case 3: KEY on a virtual column with ON UPDATE CASCADE
+--echo #
+
+--connection master
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 80);
+
+CREATE TABLE t2 (a INT KEY, b INT,
+ v_col int as (b+1) virtual, KEY (v_col),
+ CONSTRAINT b FOREIGN KEY (b) REFERENCES t1(a) ON UPDATE CASCADE
+) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (51, 1, default);
+--sync_slave_with_master
+
+--connection master
+UPDATE t1 SET a = 50 WHERE a = 1;
+
+--echo #
+--echo # Master: Verify that ON UPDATE CASCADE works fine
+--echo # old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51)
+--echo #
+SELECT * FROM t2 WHERE b=50;
+--sync_slave_with_master
+
+--echo #
+--echo # Slave: Verify that ON UPDATE CASCADE works fine
+--echo # old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51)
+--echo #
+SELECT * FROM t2 WHERE b=50;
+
+--connection master
+DROP TABLE t2, t1;
+--sync_slave_with_master
+
+--echo #
+--echo # Test case 4: Define triggers on master, their results should be
+--echo # replicated as part of row events and they should be
+--echo # applied on slave with the default
+--echo # slave_run_triggers_for_rbr=NO
+--echo #
+
+# In row-based replication, the binary log contains row changes. It will have
+# both the changes made by the statement itself, and the changes made by the
+# triggers that were invoked by the statement. Slave server(s) do not need to
+# run triggers for row changes they are applying. Hence verify that this
+# property remains the same and data should be available as if trigger was
+# executed. Please note by default slave_run_triggers_for_rbr=NO.
+
+--connection master
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (count INT NOT NULL) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (1);
+INSERT INTO t1 VALUES (2),(3);
+--sync_slave_with_master
+
+SHOW GLOBAL VARIABLES LIKE 'slave_run_triggers_for_rbr';
+--echo #
+--echo # As two rows are inserted in table 't1', two rows should get inserted
+--echo # into table 't2' as part of trigger.
+--echo #
+--let $assert_cond= COUNT(*) = 2 FROM t2
+--let $assert_text= Table t2 should have two rows.
+--source include/assert.inc
+
+--connection master
+DROP TABLE t1,t2;
+--sync_slave_with_master
+
+--echo #
+--echo # Test case 5: Define triggers + Foreign Keys on master, their results
+--echo # should be replicated as part of row events and master
+--echo # and slave should be in sync.
+--echo #
+--connection master
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+ v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+ CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t3 VALUES (1);
+
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+--sync_slave_with_master
+
+--echo #
+--echo # As two rows are inserted in table 't1', two rows should get inserted
+--echo # into table 't3' as part of trigger.
+--echo #
+--let $assert_cond= COUNT(*) = 2 FROM t3
+--let $assert_text= Table t3 should have two rows.
+--source include/assert.inc
+
+--echo #
+--echo # Verify ON DELETE CASCASE correctness
+--echo #
+--connection master
+DELETE FROM t1 WHERE id=2;
+--sync_slave_with_master
+
+--connection master
+--let $diff_tables= master:test.t1, slave:test.t1
+--source include/diff_tables.inc
+--let $diff_tables= master:test.t2, slave:test.t2
+--source include/diff_tables.inc
+--let $diff_tables= master:test.t3, slave:test.t3
+--source include/diff_tables.inc
+
+DROP TABLE t3,t2,t1;
+--sync_slave_with_master
+
+#
+# Test case: Triggers only on slave
+#
+--write_file $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc PROCEDURE
+ if ($slave_run_triggers_for_rbr == '') {
+ --die !!!ERROR IN TEST: you must set $slave_run_triggers_for_rbr
+ }
+
+--connection slave
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+--eval SET GLOBAL slave_run_triggers_for_rbr= $slave_run_triggers_for_rbr;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+
+--connection master
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (t1_id INT NOT NULL,
+ v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col),
+ KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+--sync_slave_with_master
+
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+
+--connection master
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+--sync_slave_with_master
+
+if ($slave_run_triggers_for_rbr == 'NO') {
+--echo #
+--echo # Count must be 0
+--echo #
+--let $assert_cond= COUNT(*) = 0 FROM t3
+--let $assert_text= Table t3 should have zero rows.
+--source include/assert.inc
+}
+if ($slave_run_triggers_for_rbr == 'YES') {
+--echo #
+--echo # Count must be 2
+--echo #
+--let $assert_cond= COUNT(*) = 2 FROM t3
+--let $assert_text= Table t3 should have two rows.
+--source include/assert.inc
+}
+
+--connection master
+DELETE FROM t1 WHERE id=2;
+--sync_slave_with_master
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+
+--echo #
+--echo # Verify t1, t2 are consistent on slave.
+--echo #
+--let $diff_tables= master:test.t1, slave:test.t1
+--source include/diff_tables.inc
+--let $diff_tables= master:test.t2, slave:test.t2
+--source include/diff_tables.inc
+
+--connection master
+DROP TABLE t3,t2,t1;
+--sync_slave_with_master
+#END OF
+PROCEDURE
+
+--echo #
+--echo # Test case 6: Triggers are present only on slave and
+--echo # 'slave_run_triggers_for_rbr=NO'
+--echo #
+--let $slave_run_triggers_for_rbr=NO
+--source $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc
+
+--echo #
+--echo # Test case 7: Triggers are present only on slave and
+--echo # 'slave_run_triggers_for_rbr=YES'
+--echo #
+--let $slave_run_triggers_for_rbr=YES
+--source $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc
+--remove_file $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc
+
+#
+# Test case: Trigger and Foreign Key are present only on slave
+#
+--write_file $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc PROCEDURE
+ if ($slave_run_triggers_for_rbr == '') {
+ --die !!!ERROR IN TEST: you must set $slave_run_triggers_for_rbr
+ }
+
+--connection slave
+SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr;
+--eval SET GLOBAL slave_run_triggers_for_rbr= $slave_run_triggers_for_rbr;
+SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%';
+
+--connection master
+CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;
+SET sql_log_bin=0;
+CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB;
+SET sql_log_bin=1;
+CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB;
+--sync_slave_with_master
+
+# Have foreign key and trigger on slave.
+CREATE TABLE t2 (t1_id INT NOT NULL,
+ v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id),
+ CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE
+) ENGINE=InnoDB;
+CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1);
+
+--connection master
+INSERT INTO t1 VALUES (2),(3);
+INSERT INTO t2 VALUES (2, default), (3, default);
+--sync_slave_with_master
+
+if ($slave_run_triggers_for_rbr == 'NO') {
+--echo #
+--echo # Count must be 0
+--echo #
+--let $assert_cond= COUNT(*) = 0 FROM t3
+--let $assert_text= Table t3 should have zero rows.
+--source include/assert.inc
+}
+if ($slave_run_triggers_for_rbr == 'YES') {
+--echo #
+--echo # Count must be 2
+--echo #
+--let $assert_cond= COUNT(*) = 2 FROM t3
+--let $assert_text= Table t3 should have two rows.
+--source include/assert.inc
+}
+
+--connection master
+DELETE FROM t1 WHERE id=2;
+--echo # t1: Should have one row
+SELECT * FROM t1;
+--echo # t2: Should have two rows
+SELECT * FROM t2;
+--sync_slave_with_master
+--echo # t1: Should have one row
+SELECT * FROM t1;
+--echo # t2: Should have one row on slave due to ON DELETE CASCASE
+SELECT * FROM t2;
+SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr;
+
+--connection master
+DROP TABLE t3,t2,t1;
+--sync_slave_with_master
+#END OF
+PROCEDURE
+
+--echo #
+--echo # Test case 8: Triggers and Foreign Keys are present only on slave and
+--echo # 'slave_run_triggers_for_rbr=NO'
+--echo #
+--let $slave_run_triggers_for_rbr=NO
+--source $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc
+
+--echo #
+--echo # Test case 9: Triggers are Foreign Keys are present only on slave and
+--echo # 'slave_run_triggers_for_rbr=YES'
+--echo #
+--let $slave_run_triggers_for_rbr=YES
+--source $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc
+--remove_file $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol-slave.opt b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol-slave.opt
new file mode 100644
index 00000000000..a1b687d691e
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol-slave.opt
@@ -0,0 +1 @@
+--slave_compressed_protocol
diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol.test b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol.test
new file mode 100644
index 00000000000..bc05bec2a96
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_compressed_protocol.test
@@ -0,0 +1,55 @@
+################################################################################
+# Bug#26027024 SLAVE_COMPRESSED_PROTOCOL DOESN'T WORK WITH SEMI-SYNC
+# REPLICATION IN MYSQL-5.7
+#
+# Steps to reproduce:
+# 1) Set slave_compressed_protocol ON on Slave.
+# 2) Do some sample work on Master
+# 3) After the work is synced on Slave, check that there is no error
+# (Read semi-sync reply magic number error) on Slave.
+# 4) Cleanup
+################################################################################
+# Test is independent of Binlog format. One of the three formats is enough
+# for testing. Choosing 'Row' format.
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+--let $sav_enabled_master=`SELECT @@GLOBAL.rpl_semi_sync_master_enabled `
+SET @@GLOBAL.rpl_semi_sync_master_enabled = 1;
+
+--connection slave
+source include/stop_slave.inc;
+--let $sav_enabled_slave=`SELECT @@GLOBAL.rpl_semi_sync_slave_enabled `
+SET @@GLOBAL.rpl_semi_sync_slave_enabled = 1;
+source include/start_slave.inc;
+
+--connection master
+# Do some sample work on Master with slave_compressed_protocol ON.
+# (slave_compressed_protocol is set to ON in -slave.opt file of this test.)
+CREATE TABLE t1 (i INT);
+DROP TABLE t1;
+
+# Make sure sync is done, so that next 'assert' step can be executed without
+# any issues.
+--source include/rpl_sync.inc
+
+# Without the fix, the test would have generated few
+# errors in the error log. With the fix, test will
+# pass without any errors in the error log.
+--let $assert_text= Check that there is no 'Read semi-sync reply magic number error' in error log.
+--let $assert_select=Read semi-sync reply magic number error
+--let $assert_file= $MYSQLTEST_VARDIR/log/mysqld.1.err
+--let $assert_count= 0
+--let $assert_only_after = CURRENT_TEST:rpl.rpl_semi_sync_slave_compressed_protocol.test
+--source include/assert_grep.inc
+
+--connection master
+--evalp SET @@GLOBAL. rpl_semi_sync_master_enabled = $sav_enabled_master
+
+--connection slave
+source include/stop_slave.inc;
+--evalp SET @@GLOBAL. rpl_semi_sync_slave_enabled = $sav_enabled_slave
+source include/start_slave.inc;
+
+# Cleanup
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test b/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test
index 52cd9e31753..f67c6e2ac0a 100644
--- a/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test
+++ b/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test
@@ -31,8 +31,12 @@ echo [ enable semi-sync on slave ];
stop slave;
set global rpl_semi_sync_slave_enabled = 1;
start slave;
+let $status_var= rpl_semi_sync_slave_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
show status like 'rpl_semi_sync_slave%';
+
connection master;
CREATE TABLE t1(a INT) ENGINE=InnoDB;
sync_slave_with_master;
@@ -190,6 +194,12 @@ connection con1;
INSERT INTO t1 values (2);
sync_slave_with_master;
connection con1;
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
+let $status_var= Rpl_semi_sync_master_status;
+let $status_var_value= ON;
+source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
@@ -259,7 +269,12 @@ START SLAVE IO_THREAD;
--echo #########################################################
connection con1;
SET GLOBAL rpl_semi_sync_master_enabled = 0;
+
+let $status_var= Rpl_semi_sync_master_clients;
+let $status_var_value= 1;
+source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_clients';
+
INSERT INTO t1 VALUES (1);
SET GLOBAL rpl_semi_sync_master_enabled = 1;
INSERT INTO t1 VALUES (2);
diff --git a/mysql-test/suite/rpl/t/rpl_sp.test b/mysql-test/suite/rpl/t/rpl_sp.test
index fb30c5708bc..637dda47489 100644
--- a/mysql-test/suite/rpl/t/rpl_sp.test
+++ b/mysql-test/suite/rpl/t/rpl_sp.test
@@ -7,8 +7,6 @@ source include/have_binlog_format_mixed.inc;
source include/master-slave.inc;
set local sql_mode='';
-set global sql_mode='';
-
# we need a db != test, where we don't have automatic grants
--disable_warnings
drop database if exists mysqltest1;
@@ -199,9 +197,11 @@ delimiter ;|
delete t1,t2 from t1,t2;
select fn1(20);
insert into t2 values(fn1(21));
+--sorted_result
select * from t1;
select * from t2;
sync_slave_with_master;
+--sorted_result
select * from t1;
select * from t2;
@@ -728,4 +728,3 @@ drop procedure p1;
# Cleanup
sync_slave_with_master;
--source include/rpl_end.inc
-set global sql_mode=default;
diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt b/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt
deleted file mode 100644
index 627becdbfb5..00000000000
--- a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb
diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables.test b/mysql-test/suite/rpl/t/rpl_spec_variables.test
index 2cb580fce83..e2d5175036c 100644
--- a/mysql-test/suite/rpl/t/rpl_spec_variables.test
+++ b/mysql-test/suite/rpl/t/rpl_spec_variables.test
@@ -260,10 +260,12 @@ DROP TABLE IF EXISTS t1,t2,t3;
--echo * sql_mode *
--connection master
+SET @old_sql_mode_master= @@global.sql_mode;
SET @@global.sql_mode=ANSI;
SET @@session.sql_mode=ANSI;
--connection slave
+SET @old_sql_mode_slave= @@global.sql_mode;
SET @@global.sql_mode=TRADITIONAL;
SET @@session.sql_mode=TRADITIONAL;
@@ -292,14 +294,16 @@ DROP TABLE t1;
SET @@global.character_set_database=@restore_master_character_set_database;
SET @@global.collation_server=@restore_master_collation_server;
SET @@global.storage_engine=@restore_master_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_master;
--sync_slave_with_master
SET @@global.character_set_database=@restore_slave_character_set_database;
SET @@global.collation_server=@restore_slave_collation_server;
SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size;
SET @@global.storage_engine=@restore_slave_storage_engine;
-
+SET @@global.sql_mode=@old_sql_mode_slave;
# Put at the end since the test otherwise emptied the table.
-
+remove_file $MYSQLTEST_VARDIR/tmp/words.dat;
+remove_file $MYSQLTEST_VARDIR/tmp/words2.dat;
--echo
call mtr.add_suppression("The table 't[12]' is full");
diff --git a/mysql-test/suite/rpl/t/rpl_table_options.test b/mysql-test/suite/rpl/t/rpl_table_options.test
index 3f52444a3c7..6dd1c9bd20d 100644
--- a/mysql-test/suite/rpl/t/rpl_table_options.test
+++ b/mysql-test/suite/rpl/t/rpl_table_options.test
@@ -18,6 +18,7 @@ connection master;
# the option is unknown.
#
create table t1 (a int not null) ull=12340;
+alter table t1 ull=12350;
show create table t1;
sync_slave_with_master;
diff --git a/mysql-test/suite/sql_sequence/concurrent_create.result b/mysql-test/suite/sql_sequence/concurrent_create.result
index 7e68195f7e0..2473abef37d 100644
--- a/mysql-test/suite/sql_sequence/concurrent_create.result
+++ b/mysql-test/suite/sql_sequence/concurrent_create.result
@@ -31,3 +31,16 @@ connection con1;
disconnect con1;
connection default;
DROP TABLE s1,s2;
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connect con1,localhost,root,,test;
+CREATE SEQUENCE s1 ENGINE=InnoDB;
+FLUSH TABLES;
+disconnect con1;
+connection default;
+SELECT NEXTVAL(s1);
+NEXTVAL(s1)
+1
+COMMIT;
+DROP TABLE t1;
+DROP SEQUENCE s1;
diff --git a/mysql-test/suite/sql_sequence/concurrent_create.test b/mysql-test/suite/sql_sequence/concurrent_create.test
index d6a57ff7d50..b27a6d3bdb9 100644
--- a/mysql-test/suite/sql_sequence/concurrent_create.test
+++ b/mysql-test/suite/sql_sequence/concurrent_create.test
@@ -56,3 +56,22 @@ FLUSH TABLES;
--connection default
DROP TABLE s1,s2;
+
+#
+# MDEV-24545 Sequence created by one connection remains invisible to another
+#
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+--connect (con1,localhost,root,,test)
+CREATE SEQUENCE s1 ENGINE=InnoDB;
+FLUSH TABLES;
+--disconnect con1
+
+--connection default
+SELECT NEXTVAL(s1);
+COMMIT;
+
+# Cleanup
+DROP TABLE t1;
+DROP SEQUENCE s1;
diff --git a/mysql-test/suite/sql_sequence/create.result b/mysql-test/suite/sql_sequence/create.result
index 14464c60e99..5a53a66c9a8 100644
--- a/mysql-test/suite/sql_sequence/create.result
+++ b/mysql-test/suite/sql_sequence/create.result
@@ -666,7 +666,7 @@ create temporary sequence s;
drop temporary table s;
create temporary table s (i int);
drop temporary sequence s;
-ERROR 42S02: Unknown SEQUENCE: 'test.s'
+ERROR 42S02: 'test.s' is not a SEQUENCE
drop table s;
#
# MDEV-15115 Assertion failure in CREATE SEQUENCE...ROW_FORMAT=REDUNDANT
diff --git a/mysql-test/suite/sql_sequence/create.test b/mysql-test/suite/sql_sequence/create.test
index 2c41fb3658b..ac3aae845cd 100644
--- a/mysql-test/suite/sql_sequence/create.test
+++ b/mysql-test/suite/sql_sequence/create.test
@@ -489,7 +489,7 @@ drop table s;
create temporary sequence s;
drop temporary table s;
create temporary table s (i int);
---error ER_UNKNOWN_SEQUENCES
+--error ER_NOT_SEQUENCE2
drop temporary sequence s;
drop table s;
diff --git a/mysql-test/suite/sql_sequence/mysqldump.result b/mysql-test/suite/sql_sequence/mysqldump.result
index e6aedb57ea6..fb023cc5e36 100644
--- a/mysql-test/suite/sql_sequence/mysqldump.result
+++ b/mysql-test/suite/sql_sequence/mysqldump.result
@@ -2,8 +2,46 @@ CREATE SEQUENCE a1 engine=aria;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
insert into t1 values (1),(2);
CREATE SEQUENCE x1 engine=innodb;
+# dump whole database
CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables order 1
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables order 2
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables only tables
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `t1` (
@@ -12,8 +50,12 @@ CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `t1` VALUES (1),(2);
+# dump by tables only sequences
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
SELECT SETVAL(`x1`, 1, 0);
+# end of dumps
DROP TABLE a1,t1,x1;
set default_storage_engine=InnoDB;
create sequence t1;
diff --git a/mysql-test/suite/sql_sequence/mysqldump.test b/mysql-test/suite/sql_sequence/mysqldump.test
index 308f06d5e8d..d2afb2fd675 100644
--- a/mysql-test/suite/sql_sequence/mysqldump.test
+++ b/mysql-test/suite/sql_sequence/mysqldump.test
@@ -11,7 +11,18 @@ CREATE SEQUENCE a1 engine=aria;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
insert into t1 values (1),(2);
CREATE SEQUENCE x1 engine=innodb;
+--echo # dump whole database
--exec $MYSQL_DUMP --compact test
+--echo # dump by tables order 1
+--exec $MYSQL_DUMP --compact --tables test t1 a1 x1
+--echo # dump by tables order 2
+--exec $MYSQL_DUMP --compact --tables test a1 t1 x1
+--echo # dump by tables only tables
+--exec $MYSQL_DUMP --compact --tables test t1
+--echo # dump by tables only sequences
+--exec $MYSQL_DUMP --compact --tables test a1 x1
+--echo # end of dumps
+
DROP TABLE a1,t1,x1;
#
diff --git a/mysql-test/suite/sql_sequence/other.result b/mysql-test/suite/sql_sequence/other.result
index abc101b3c00..643233149d2 100644
--- a/mysql-test/suite/sql_sequence/other.result
+++ b/mysql-test/suite/sql_sequence/other.result
@@ -300,4 +300,63 @@ update t1 set p_first_name='Yunxi' where p_id=1;
drop view v2;
drop table t1,t2;
drop sequence s1;
+#
+# MDEV-19273:Server crash in MDL_ticket::has_stronger_or_equal_type or
+# Assertion `thd->mdl_context.is_lock_owner(MDL_key::TABLE,
+# table->db.str, table->table_name.str, MDL_SHARED)' failed
+# in mysql_rm_table_no_locks
+#
+CREATE TABLE t1 (a INT);
+CREATE TEMPORARY TABLE tmp (b INT);
+LOCK TABLE t1 READ;
+DROP SEQUENCE tmp;
+ERROR 42S02: 'test.tmp' is not a SEQUENCE
+DROP TEMPORARY SEQUENCE tmp;
+ERROR 42S02: 'test.tmp' is not a SEQUENCE
+DROP SEQUENCE t1;
+ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
+DROP TEMPORARY SEQUENCE t1;
+ERROR 42S02: Unknown SEQUENCE: 'test.t1'
+UNLOCK TABLES;
+DROP SEQUENCE t1;
+ERROR 42S02: 'test.t1' is not a SEQUENCE
+DROP TEMPORARY SEQUENCE t1;
+ERROR 42S02: Unknown SEQUENCE: 'test.t1'
+DROP TABLE t1;
+CREATE TABLE t (a INT);
+CREATE TEMPORARY TABLE s (f INT);
+CREATE SEQUENCE s;
+LOCK TABLE t WRITE;
+DROP SEQUENCE s;
+ERROR 42S02: 'test.s' is not a SEQUENCE
+DROP TEMPORARY SEQUENCE s;
+ERROR 42S02: 'test.s' is not a SEQUENCE
+UNLOCK TABLES;
+CREATE TEMPORARY SEQUENCE s;
+LOCK TABLE t WRITE;
+DROP TEMPORARY SEQUENCE s;
+UNLOCK TABLES;
+DROP TEMPORARY TABLE s;
+DROP SEQUENCE s;
+create table s(a INT);
+CREATE TEMPORARY TABLE s (f INT);
+LOCK TABLE t WRITE;
+DROP TEMPORARY TABLE s;
+CREATE TEMPORARY TABLE s (f INT);
+DROP TABLE s;
+DROP TABLE s;
+ERROR HY000: Table 's' was not locked with LOCK TABLES
+UNLOCK TABLES;
+DROP TABLE s;
+CREATE VIEW v1 as SELECT * FROM t;
+CREATE SEQUENCE s;
+DROP SEQUENCE IF EXISTS v1;
+Warnings:
+Note 4091 Unknown SEQUENCE: 'test.v1'
+DROP VIEW IF EXISTS s;
+Warnings:
+Note 4092 Unknown VIEW: 'test.s'
+DROP VIEW v1;
+DROP SEQUENCE s;
+DROP TABLE t;
# End of 10.3 tests
diff --git a/mysql-test/suite/sql_sequence/other.test b/mysql-test/suite/sql_sequence/other.test
index 70c4efa40e5..0fbb2d0e2f9 100644
--- a/mysql-test/suite/sql_sequence/other.test
+++ b/mysql-test/suite/sql_sequence/other.test
@@ -315,4 +315,68 @@ drop view v2;
drop table t1,t2;
drop sequence s1;
+--echo #
+--echo # MDEV-19273:Server crash in MDL_ticket::has_stronger_or_equal_type or
+--echo # Assertion `thd->mdl_context.is_lock_owner(MDL_key::TABLE,
+--echo # table->db.str, table->table_name.str, MDL_SHARED)' failed
+--echo # in mysql_rm_table_no_locks
+--echo #
+
+CREATE TABLE t1 (a INT);
+CREATE TEMPORARY TABLE tmp (b INT);
+LOCK TABLE t1 READ;
+--error ER_NOT_SEQUENCE2
+DROP SEQUENCE tmp;
+--error ER_NOT_SEQUENCE2
+DROP TEMPORARY SEQUENCE tmp;
+--error ER_TABLE_NOT_LOCKED_FOR_WRITE
+DROP SEQUENCE t1;
+--error ER_UNKNOWN_SEQUENCES
+DROP TEMPORARY SEQUENCE t1;
+UNLOCK TABLES;
+--error ER_NOT_SEQUENCE2
+DROP SEQUENCE t1;
+--error ER_UNKNOWN_SEQUENCES
+DROP TEMPORARY SEQUENCE t1;
+
+# Cleanup
+DROP TABLE t1;
+
+
+CREATE TABLE t (a INT);
+CREATE TEMPORARY TABLE s (f INT);
+CREATE SEQUENCE s;
+LOCK TABLE t WRITE;
+--error ER_NOT_SEQUENCE2
+DROP SEQUENCE s;
+--error ER_NOT_SEQUENCE2
+DROP TEMPORARY SEQUENCE s;
+UNLOCK TABLES;
+CREATE TEMPORARY SEQUENCE s;
+LOCK TABLE t WRITE;
+DROP TEMPORARY SEQUENCE s;
+UNLOCK TABLES;
+DROP TEMPORARY TABLE s;
+DROP SEQUENCE s;
+
+create table s(a INT);
+CREATE TEMPORARY TABLE s (f INT);
+LOCK TABLE t WRITE;
+DROP TEMPORARY TABLE s;
+CREATE TEMPORARY TABLE s (f INT);
+DROP TABLE s;
+--error ER_TABLE_NOT_LOCKED
+DROP TABLE s;
+UNLOCK TABLES;
+DROP TABLE s;
+
+CREATE VIEW v1 as SELECT * FROM t;
+CREATE SEQUENCE s;
+
+DROP SEQUENCE IF EXISTS v1;
+DROP VIEW IF EXISTS s;
+
+DROP VIEW v1;
+DROP SEQUENCE s;
+DROP TABLE t;
--echo # End of 10.3 tests
diff --git a/mysql-test/suite/sys_vars/inc/sysvars_server.inc b/mysql-test/suite/sys_vars/inc/sysvars_server.inc
index 36b41cbdc09..025f8a8922d 100644
--- a/mysql-test/suite/sys_vars/inc/sysvars_server.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvars_server.inc
@@ -23,7 +23,7 @@ select VARIABLE_NAME,VARIABLE_SCOPE,VARIABLE_TYPE,VARIABLE_COMMENT,NUMERIC_MIN_V
variable_name not like 'wsrep%' and
variable_name not like 's3%' and
variable_name not in (
- 'log_tc_size'
+ 'log_tc_size','have_sanitizer'
)
order by variable_name;
diff --git a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
index eec16411144..5a9c201f494 100644
--- a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
@@ -11,18 +11,26 @@ SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
strict_crc32
SET GLOBAL innodb_checksum_algorithm = 'innodb';
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
innodb
SET GLOBAL innodb_checksum_algorithm = 'strict_innodb';
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
strict_innodb
SET GLOBAL innodb_checksum_algorithm = 'none';
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
none
SET GLOBAL innodb_checksum_algorithm = 'strict_none';
+Warnings:
+Warning 138 Setting innodb_checksum_algorithm to values other than crc32, full_crc32, strict_crc32 or strict_full_crc32 is UNSAFE and DEPRECATED. These deprecated values will be disallowed in MariaDB 10.6.
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
strict_none
diff --git a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
index a2c328f38fd..915343fcff2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
@@ -24,6 +24,8 @@ select * from information_schema.session_variables where variable_name='innodb_i
VARIABLE_NAME VARIABLE_VALUE
INNODB_IDLE_FLUSH_PCT 100
set global innodb_idle_flush_pct=10;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
10
@@ -44,6 +46,7 @@ ERROR 42000: Incorrect argument type to variable 'innodb_idle_flush_pct'
set global innodb_idle_flush_pct=-7;
Warnings:
Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '-7'
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
0
@@ -53,6 +56,7 @@ INNODB_IDLE_FLUSH_PCT 0
set global innodb_idle_flush_pct=106;
Warnings:
Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '106'
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
@@ -60,18 +64,26 @@ select * from information_schema.global_variables where variable_name='innodb_id
VARIABLE_NAME VARIABLE_VALUE
INNODB_IDLE_FLUSH_PCT 100
set global innodb_idle_flush_pct=0;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
0
set global innodb_idle_flush_pct=100;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
set global innodb_idle_flush_pct=DEFAULT;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
SET @@global.innodb_idle_flush_pct = @start_global_value;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
SELECT @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
diff --git a/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result b/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result
deleted file mode 100644
index 7a6c9ca2db6..00000000000
--- a/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result
+++ /dev/null
@@ -1,77 +0,0 @@
-SET @start_global_value = @@global.innodb_simulate_comp_failures;
-SELECT @start_global_value;
-@start_global_value
-0
-Valid values are between 0 and 99
-select @@global.innodb_simulate_comp_failures between 0 and 99;
-@@global.innodb_simulate_comp_failures between 0 and 99
-1
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-0
-select @@session.innodb_simulate_comp_failures;
-ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable
-show global variables like 'innodb_simulate_comp_failures';
-Variable_name Value
-innodb_simulate_comp_failures 0
-show session variables like 'innodb_simulate_comp_failures';
-Variable_name Value
-innodb_simulate_comp_failures 0
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 0
-select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 0
-set global innodb_simulate_comp_failures=10;
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-10
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 10
-select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 10
-set session innodb_simulate_comp_failures=1;
-ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable and should be set with SET GLOBAL
-set global innodb_simulate_comp_failures=1.1;
-ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
-set global innodb_simulate_comp_failures=1e1;
-ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
-set global innodb_simulate_comp_failures="foo";
-ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
-set global innodb_simulate_comp_failures=-7;
-Warnings:
-Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '-7'
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-0
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 0
-set global innodb_simulate_comp_failures=106;
-Warnings:
-Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '106'
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-99
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_SIMULATE_COMP_FAILURES 99
-set global innodb_simulate_comp_failures=0;
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-0
-set global innodb_simulate_comp_failures=99;
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-99
-set global innodb_simulate_comp_failures=DEFAULT;
-select @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-0
-SET @@global.innodb_simulate_comp_failures = @start_global_value;
-SELECT @@global.innodb_simulate_comp_failures;
-@@global.innodb_simulate_comp_failures
-0
diff --git a/mysql-test/suite/sys_vars/r/max_sort_length_basic.result b/mysql-test/suite/sys_vars/r/max_sort_length_basic.result
deleted file mode 100644
index b48b045897c..00000000000
--- a/mysql-test/suite/sys_vars/r/max_sort_length_basic.result
+++ /dev/null
@@ -1,199 +0,0 @@
-SET @start_global_value = @@global.max_sort_length;
-SELECT @start_global_value;
-@start_global_value
-1024
-SET @start_session_value = @@session.max_sort_length;
-SELECT @start_session_value;
-@start_session_value
-1024
-'#--------------------FN_DYNVARS_084_01-------------------------#'
-SET @@global.max_sort_length = 1000;
-SET @@global.max_sort_length = DEFAULT;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-1024
-SET @@session.max_sort_length = 1000;
-SET @@session.max_sort_length = DEFAULT;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-1024
-'#--------------------FN_DYNVARS_084_02-------------------------#'
-SET @@global.max_sort_length = DEFAULT;
-SELECT @@global.max_sort_length = 1024;
-@@global.max_sort_length = 1024
-1
-SET @@session.max_sort_length = DEFAULT;
-SELECT @@session.max_sort_length = 1024;
-@@session.max_sort_length = 1024
-1
-'#--------------------FN_DYNVARS_084_03-------------------------#'
-SET @@global.max_sort_length = 8;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = 9;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-9
-SET @@global.max_sort_length = 8388608;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8388608
-SET @@global.max_sort_length = 8388607;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8388607
-SET @@global.max_sort_length = 65536;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-65536
-'#--------------------FN_DYNVARS_084_04-------------------------#'
-SET @@session.max_sort_length = 8;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8
-SET @@session.max_sort_length = 9;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-9
-SET @@session.max_sort_length = 8388608;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8388608
-SET @@session.max_sort_length = 8388607;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8388607
-SET @@session.max_sort_length = 65536;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-65536
-'#------------------FN_DYNVARS_084_05-----------------------#'
-SET @@global.max_sort_length = -1024;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '-1024'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = 3;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '3'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = 8388609;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '8388609'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8388608
-SET @@global.max_sort_length = 0;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '0'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = 65530.34;
-ERROR 42000: Incorrect argument type to variable 'max_sort_length'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = test;
-ERROR 42000: Incorrect argument type to variable 'max_sort_length'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@session.max_sort_length = 8388610;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '8388610'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8388608
-SET @@session.max_sort_length = -1;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '-1'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8
-SET @@session.max_sort_length = 3;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '3'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8
-SET @@session.max_sort_length = 0;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '0'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8
-SET @@session.max_sort_length = 65530.34;
-ERROR 42000: Incorrect argument type to variable 'max_sort_length'
-SET @@session.max_sort_length = 10737418241;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '10737418241'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8388608
-SET @@session.max_sort_length = test;
-ERROR 42000: Incorrect argument type to variable 'max_sort_length'
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-8388608
-'#------------------FN_DYNVARS_084_06-----------------------#'
-SELECT @@global.max_sort_length = VARIABLE_VALUE
-FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
-WHERE VARIABLE_NAME='max_sort_length';
-@@global.max_sort_length = VARIABLE_VALUE
-1
-'#------------------FN_DYNVARS_084_07-----------------------#'
-SELECT @@session.max_sort_length = VARIABLE_VALUE
-FROM INFORMATION_SCHEMA.SESSION_VARIABLES
-WHERE VARIABLE_NAME='max_sort_length';
-@@session.max_sort_length = VARIABLE_VALUE
-1
-'#------------------FN_DYNVARS_084_08-----------------------#'
-SET @@global.max_sort_length = TRUE;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '1'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-SET @@global.max_sort_length = FALSE;
-Warnings:
-Warning 1292 Truncated incorrect max_sort_length value: '0'
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-8
-'#---------------------FN_DYNVARS_084_09----------------------#'
-SET @@global.max_sort_length = 2048;
-SELECT @@max_sort_length = @@global.max_sort_length;
-@@max_sort_length = @@global.max_sort_length
-0
-'#---------------------FN_DYNVARS_084_10----------------------#'
-SET @@max_sort_length = 100000;
-SELECT @@max_sort_length = @@local.max_sort_length;
-@@max_sort_length = @@local.max_sort_length
-1
-SELECT @@local.max_sort_length = @@session.max_sort_length;
-@@local.max_sort_length = @@session.max_sort_length
-1
-'#---------------------FN_DYNVARS_084_11----------------------#'
-SET max_sort_length = 1024;
-SELECT @@max_sort_length;
-@@max_sort_length
-1024
-SELECT local.max_sort_length;
-ERROR 42S02: Unknown table 'local' in field list
-SELECT session.max_sort_length;
-ERROR 42S02: Unknown table 'session' in field list
-SELECT max_sort_length = @@session.max_sort_length;
-ERROR 42S22: Unknown column 'max_sort_length' in 'field list'
-SET @@global.max_sort_length = @start_global_value;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-1024
-SET @@session.max_sort_length = @start_session_value;
-SELECT @@session.max_sort_length;
-@@session.max_sort_length
-1024
diff --git a/mysql-test/suite/sys_vars/r/max_sort_length_func.result b/mysql-test/suite/sys_vars/r/max_sort_length_func.result
index 36f5518287a..3ec8faf1b7e 100644
--- a/mysql-test/suite/sys_vars/r/max_sort_length_func.result
+++ b/mysql-test/suite/sys_vars/r/max_sort_length_func.result
@@ -1,301 +1,279 @@
SET @start_value= @@global.max_sort_length;
-SET @session_max_sort_length = @@Session.max_sort_length;
-DROP TABLE IF EXISTS t;
** creating tables **
-CREATE TABLE t
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c TEXT(30)
-);
-CREATE TABLE t1
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c BLOB(30)
-);
-CREATE TABLE t2
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c TEXT(30)
-);
+CREATE TABLE t (id INT AUTO_INCREMENT PRIMARY KEY, c TEXT);
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY, c BLOB);
+CREATE TABLE t2 (id INT AUTO_INCREMENT PRIMARY KEY, c TEXT);
'#--------------------FN_DYNVARS_098_01-------------------------#'
connect test_con1,localhost,root,,;
-connection test_con1;
-SELECT @@global.max_sort_length = 10;
-@@global.max_sort_length = 10
-0
-SELECT @@session.max_sort_length = 10;
-@@session.max_sort_length = 10
-0
-** Setting value to 30 and inserting data **
-SET @@global.max_sort_length = 30;
+** Setting value to 70 and inserting data **
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
@@global.max_sort_length
-30
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+70
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
-SET @@session.max_sort_length = 29;
+SET @@session.max_sort_length = 69;
SELECT @@session.max_sort_length;
@@session.max_sort_length
-29
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+69
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
'#--------------------FN_DYNVARS_098_02-------------------------#'
connect test_con2,localhost,root,,;
-connection test_con2;
-SET @@global.max_sort_length = 30;
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
@@global.max_sort_length
-30
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+70
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
@@session.max_sort_length
-20
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+64
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
** Results should not be sorted **
'#--------------------FN_DYNVARS_098_03-------------------------#'
-SET max_sort_length=20;
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+SET max_sort_length=64;
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
** Results should not be sorted **
RESET QUERY CACHE;
'#--------------------FN_DYNVARS_098_04-------------------------#'
-SET max_sort_length=29;
+SET max_sort_length=69;
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
'#--------------------FN_DYNVARS_098_05-------------------------#'
-SET max_sort_length=30;
+SET max_sort_length=70;
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
'#--------------------FN_DYNVARS_098_06-------------------------#'
SET max_sort_length=default;
+SELECT @@max_sort_length;
+@@max_sort_length
+70
SELECT c from t ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
** Results should be sorted **
'#--------------------FN_DYNVARS_098_07-------------------------#'
Testing type BLOB
-SET @@global.max_sort_length = 30;
-SELECT @@global.max_sort_length;
-@@global.max_sort_length
-30
-INSERT INTO t1 set c = repeat('x',29);
-INSERT INTO t1 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t1 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t1 set c = concat(repeat('x',28),'g','w');
-SELECT c from t1 ORDER BY c, id;
+SET @@max_sort_length = 70;
+SELECT @@max_sort_length;
+@@max_sort_length
+70
+INSERT INTO t1 set c = repeat('x',69);
+INSERT INTO t1 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t1 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t1 set c = concat(repeat('x',68),'g','w');
+SELECT c from t1 ORDER BY c, id DESC;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
** Results should be sorted **
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
@@session.max_sort_length
-20
-INSERT INTO t1 set c = repeat('x',29);
-INSERT INTO t1 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t1 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t1 set c = concat(repeat('x',28),'g','w');
+64
+INSERT INTO t1 set c = repeat('x',69);
+INSERT INTO t1 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t1 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t1 set c = concat(repeat('x',68),'g','w');
SELECT c from t1 ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
** Results should not be sorted **
'#--------------------FN_DYNVARS_098_08-------------------------#'
Testing type CHAR
-SET @@global.max_sort_length = 30;
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
@@global.max_sort_length
-30
-INSERT INTO t2 set c = repeat('x',29);
-INSERT INTO t2 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t2 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t2 set c = concat(repeat('x',28),'g','w');
+70
+INSERT INTO t2 set c = repeat('x',69);
+INSERT INTO t2 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t2 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t2 set c = concat(repeat('x',68),'g','w');
SELECT c from t2 ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
** Results should not be sorted **
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
@@session.max_sort_length
-20
-INSERT INTO t2 set c = repeat('x',29);
-INSERT INTO t2 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t2 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t2 set c = concat(repeat('x',28),'g','w');
+64
+INSERT INTO t2 set c = repeat('x',69);
+INSERT INTO t2 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t2 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t2 set c = concat(repeat('x',68),'g','w');
SELECT c from t2 ORDER BY c, id;
c
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
-xxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxrx
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxsy
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxgw
** Results should not be sorted **
connection default;
disconnect test_con1;
disconnect test_con2;
-SET @@SESSION.max_sort_length = @session_max_sort_length;
-DROP TABLE IF EXISTS t;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
+DROP TABLE t, t1, t2;
SET @@global.max_sort_length= @start_value;
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
index 50a1d1f197a..bfbbfb43c74 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
@@ -250,7 +250,7 @@
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so.
+ VARIABLE_COMMENT DEPRECATED. This setting has no effect.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
@@ -1141,22 +1141,22 @@
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 849937ce3a4..8c4eb4a28f8 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -1106,7 +1106,7 @@ SESSION_VALUE NULL
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
-VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so.
+VARIABLE_COMMENT DEPRECATED. This setting has no effect.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
NUMERIC_BLOCK_SIZE 0
@@ -1773,18 +1773,6 @@ NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
-VARIABLE_NAME INNODB_SIMULATE_COMP_FAILURES
-SESSION_VALUE NULL
-DEFAULT_VALUE 0
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE INT UNSIGNED
-VARIABLE_COMMENT Simulate compression failures.
-NUMERIC_MIN_VALUE 0
-NUMERIC_MAX_VALUE 99
-NUMERIC_BLOCK_SIZE 0
-ENUM_VALUE_LIST NULL
-READ_ONLY NO
-COMMAND_LINE_ARGUMENT NONE
VARIABLE_NAME INNODB_SORT_BUFFER_SIZE
SESSION_VALUE NULL
DEFAULT_VALUE 1048576
@@ -2138,7 +2126,7 @@ SESSION_VALUE NULL
DEFAULT_VALUE ON
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT Enable atomic writes, instead of using the doublewrite buffer, for files on devices that supports atomic writes. To use this option one must use innodb_file_per_table=1, innodb_flush_method=O_DIRECT. This option only works on Linux with either FusionIO cards using the directFS filesystem or with Shannon cards using any file system.
+VARIABLE_COMMENT Enable atomic writes, instead of using the doublewrite buffer, for files on devices that supports atomic writes. This option only works on Linux with either FusionIO cards using the directFS filesystem or with Shannon cards using any file system.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
index 37cb483e50d..7a8984a3736 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
@@ -9,7 +9,7 @@ where variable_name not like 'debug%' and
variable_name not like 'wsrep%' and
variable_name not like 's3%' and
variable_name not in (
-'log_tc_size'
+'log_tc_size','have_sanitizer'
)
order by variable_name;
VARIABLE_NAME ALTER_ALGORITHM
@@ -1044,10 +1044,10 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME GROUP_CONCAT_MAX_LEN
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The maximum length of the result of function GROUP_CONCAT()
NUMERIC_MIN_VALUE 4
-NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -1946,7 +1946,7 @@ VARIABLE_NAME MAX_SORT_LENGTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored)
-NUMERIC_MIN_VALUE 8
+NUMERIC_MIN_VALUE 64
NUMERIC_MAX_VALUE 8388608
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
index 083469406eb..79512aa9032 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
@@ -9,7 +9,7 @@ where variable_name not like 'debug%' and
variable_name not like 'wsrep%' and
variable_name not like 's3%' and
variable_name not in (
-'log_tc_size'
+'log_tc_size','have_sanitizer'
)
order by variable_name;
VARIABLE_NAME ALTER_ALGORITHM
@@ -1064,10 +1064,10 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME GROUP_CONCAT_MAX_LEN
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The maximum length of the result of function GROUP_CONCAT()
NUMERIC_MIN_VALUE 4
-NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2106,7 +2106,7 @@ VARIABLE_NAME MAX_SORT_LENGTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored)
-NUMERIC_MIN_VALUE 8
+NUMERIC_MIN_VALUE 64
NUMERIC_MAX_VALUE 8388608
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
index 5ec5b9ccf30..4bff3d4441e 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
@@ -363,7 +363,7 @@ NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
-READ_ONLY NO
+READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_ON
SESSION_VALUE OFF
@@ -419,7 +419,7 @@ NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
-READ_ONLY NO
+READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_PROVIDER_OPTIONS
SESSION_VALUE NULL
diff --git a/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result b/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
index 47d00f5dede..1c9c2ddf3a3 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
@@ -16,9 +16,11 @@ SELECT @@global.wsrep_debug;
@@global.wsrep_debug
NONE
SET @@global.wsrep_debug=1;
+Warnings:
+Warning 1231 Setting 'wsrep_debug' has no effect because wsrep is switched off
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-SERVER
+NONE
# valid values
SET @@global.wsrep_debug=NONE;
@@ -26,9 +28,11 @@ SELECT @@global.wsrep_debug;
@@global.wsrep_debug
NONE
SET @@global.wsrep_debug=SERVER;
+Warnings:
+Warning 1231 Setting 'wsrep_debug' has no effect because wsrep is switched off
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-SERVER
+NONE
SET @@global.wsrep_debug=default;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
diff --git a/mysql-test/suite/sys_vars/r/wsrep_notify_cmd_basic.result b/mysql-test/suite/sys_vars/r/wsrep_notify_cmd_basic.result
deleted file mode 100644
index 056ff8c817b..00000000000
--- a/mysql-test/suite/sys_vars/r/wsrep_notify_cmd_basic.result
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# wsrep_notify_cmd
-#
-call mtr.add_suppression("WSREP: Failed to get provider options");
-# save the initial value
-SET @wsrep_notify_cmd_global_saved = @@global.wsrep_notify_cmd;
-# default
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-
-
-# scope
-SELECT @@session.wsrep_notify_cmd;
-ERROR HY000: Variable 'wsrep_notify_cmd' is a GLOBAL variable
-SET @@global.wsrep_notify_cmd='notify_cmd';
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-notify_cmd
-
-# valid values
-SET @@global.wsrep_notify_cmd='command';
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-command
-SET @@global.wsrep_notify_cmd='hyphenated-command';
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-hyphenated-command
-SET @@global.wsrep_notify_cmd=default;
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-
-SET @@global.wsrep_notify_cmd=NULL;
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-NULL
-
-# invalid values
-SET @@global.wsrep_notify_cmd=1;
-ERROR 42000: Incorrect argument type to variable 'wsrep_notify_cmd'
-SELECT @@global.wsrep_notify_cmd;
-@@global.wsrep_notify_cmd
-NULL
-
-# restore the initial value
-SET @@global.wsrep_notify_cmd = @wsrep_notify_cmd_global_saved;
-# End of test
diff --git a/mysql-test/suite/sys_vars/r/wsrep_on_without_provider.result b/mysql-test/suite/sys_vars/r/wsrep_on_without_provider.result
new file mode 100644
index 00000000000..525619dba29
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/wsrep_on_without_provider.result
@@ -0,0 +1,5 @@
+SET GLOBAL wsrep_on=ON;
+ERROR HY000: WSREP (galera) can't be enabled if the wsrep_provider is unset or set to 'none'
+SELECT @@global.wsrep_on;
+@@global.wsrep_on
+0
diff --git a/mysql-test/suite/sys_vars/r/wsrep_provider_basic.result b/mysql-test/suite/sys_vars/r/wsrep_provider_basic.result
deleted file mode 100644
index 3e4ac8ca883..00000000000
--- a/mysql-test/suite/sys_vars/r/wsrep_provider_basic.result
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# wsrep_provider
-#
-# save the initial value
-SET @wsrep_provider_global_saved = @@global.wsrep_provider;
-# default
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-none
-
-# scope
-SELECT @@session.wsrep_provider;
-ERROR HY000: Variable 'wsrep_provider' is a GLOBAL variable
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-none
-
-# valid values
-SET @@global.wsrep_provider=default;
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-none
-
-# invalid values
-SET @@global.wsrep_provider='/invalid/libgalera_smm.so';
-ERROR 42000: Variable 'wsrep_provider' can't be set to the value of '/invalid/libgalera_smm.so'
-SET @@global.wsrep_provider=NULL;
-ERROR 42000: Variable 'wsrep_provider' can't be set to the value of 'NULL'
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-none
-SET @@global.wsrep_provider=1;
-ERROR 42000: Incorrect argument type to variable 'wsrep_provider'
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-none
-
-# restore the initial value
-SET @@global.wsrep_provider = @wsrep_provider_global_saved;
-# End of test
diff --git a/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result b/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result
deleted file mode 100644
index 15949a14e39..00000000000
--- a/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# wsrep_provider_options
-#
-call mtr.add_suppression("WSREP: Failed to get provider options");
-# default
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-
-# scope
-SELECT @@session.wsrep_provider_options;
-ERROR HY000: Variable 'wsrep_provider_options' is a GLOBAL variable
-SET @@global.wsrep_provider_options='option1';
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-
-# valid values
-SET @@global.wsrep_provider_options='name1=value1;name2=value2';
-ERROR HY000: WSREP (galera) not started
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-SET @@global.wsrep_provider_options='hyphenated-name:value';
-ERROR HY000: WSREP (galera) not started
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-SET @@global.wsrep_provider_options=default;
-ERROR HY000: WSREP (galera) not started
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-
-# invalid values
-SET @@global.wsrep_provider_options=1;
-ERROR 42000: Incorrect argument type to variable 'wsrep_provider_options'
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-SET @@global.wsrep_provider_options=NULL;
-Got one of the listed errors
-SELECT @@global.wsrep_provider_options;
-@@global.wsrep_provider_options
-
-# End of test
diff --git a/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test b/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test
deleted file mode 100644
index 07e70bf7343..00000000000
--- a/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test
+++ /dev/null
@@ -1,65 +0,0 @@
---source include/have_innodb.inc
---source include/have_debug.inc
-
-SET @start_global_value = @@global.innodb_simulate_comp_failures;
-SELECT @start_global_value;
-
-#
-# exists as global only
-#
-
---echo Valid values are between 0 and 99
-select @@global.innodb_simulate_comp_failures between 0 and 99;
-select @@global.innodb_simulate_comp_failures;
-
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.innodb_simulate_comp_failures;
-
-show global variables like 'innodb_simulate_comp_failures';
-show session variables like 'innodb_simulate_comp_failures';
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
-
-#
-# show that it's writable
-#
-
-set global innodb_simulate_comp_failures=10;
-select @@global.innodb_simulate_comp_failures;
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
-
---error ER_GLOBAL_VARIABLE
-set session innodb_simulate_comp_failures=1;
-
-#
-# incorrect types
-#
-
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_simulate_comp_failures=1.1;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_simulate_comp_failures=1e1;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_simulate_comp_failures="foo";
-
-set global innodb_simulate_comp_failures=-7;
-select @@global.innodb_simulate_comp_failures;
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-set global innodb_simulate_comp_failures=106;
-select @@global.innodb_simulate_comp_failures;
-select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
-
-#
-# min/max/DEFAULT values
-#
-
-set global innodb_simulate_comp_failures=0;
-select @@global.innodb_simulate_comp_failures;
-set global innodb_simulate_comp_failures=99;
-select @@global.innodb_simulate_comp_failures;
-set global innodb_simulate_comp_failures=DEFAULT;
-select @@global.innodb_simulate_comp_failures;
-
-SET @@global.innodb_simulate_comp_failures = @start_global_value;
-SELECT @@global.innodb_simulate_comp_failures;
diff --git a/mysql-test/suite/sys_vars/t/max_sort_length_basic.test b/mysql-test/suite/sys_vars/t/max_sort_length_basic.test
deleted file mode 100644
index fcd6db017f1..00000000000
--- a/mysql-test/suite/sys_vars/t/max_sort_length_basic.test
+++ /dev/null
@@ -1,225 +0,0 @@
-############## mysql-test\t\max_sort_length_basic.test ###############
-# #
-# Variable Name: max_sort_length #
-# Scope: GLOBAL | SESSION #
-# Access Type: Dynamic #
-# Data Type: numeric #
-# Default Value: 1024 #
-# Range: 4-8388608 #
-# #
-# #
-# Creation Date: 2008-02-07 #
-# Author: Salman #
-# #
-# Description: Test Cases of Dynamic System Variable max_sort_length #
-# that checks the behavior of this variable in the following ways#
-# * Default Value #
-# * Valid & Invalid values #
-# * Scope & Access method #
-# * Data Integrity #
-# #
-# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
-# server-system-variables.html #
-# #
-###############################################################################
-
---source include/load_sysvars.inc
-
-
-############################################
-# START OF max_sort_length TESTS #
-############################################
-
-
-#############################################################
-# Save initial value #
-#############################################################
-
-SET @start_global_value = @@global.max_sort_length;
-SELECT @start_global_value;
-SET @start_session_value = @@session.max_sort_length;
-SELECT @start_session_value;
-
-
---echo '#--------------------FN_DYNVARS_084_01-------------------------#'
-#######################################################
-# Display the DEFAULT value of max_sort_length #
-#######################################################
-
-SET @@global.max_sort_length = 1000;
-SET @@global.max_sort_length = DEFAULT;
-SELECT @@global.max_sort_length;
-
-
-SET @@session.max_sort_length = 1000;
-SET @@session.max_sort_length = DEFAULT;
-SELECT @@session.max_sort_length;
-
-
---echo '#--------------------FN_DYNVARS_084_02-------------------------#'
-#######################################################
-# Check the DEFAULT value of max_sort_length #
-#######################################################
-
-SET @@global.max_sort_length = DEFAULT;
-SELECT @@global.max_sort_length = 1024;
-
-SET @@session.max_sort_length = DEFAULT;
-SELECT @@session.max_sort_length = 1024;
-
-
-
---echo '#--------------------FN_DYNVARS_084_03-------------------------#'
-#########################################################################
-# Change the value of max_sort_length to a valid value for GLOBAL Scope #
-#########################################################################
-
-SET @@global.max_sort_length = 8;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 9;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 8388608;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 8388607;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 65536;
-SELECT @@global.max_sort_length;
-
---echo '#--------------------FN_DYNVARS_084_04-------------------------#'
-##########################################################################
-# Change the value of max_sort_length to a valid value for SESSION Scope #
-##########################################################################
-
-SET @@session.max_sort_length = 8;
-SELECT @@session.max_sort_length;
-
-SET @@session.max_sort_length = 9;
-SELECT @@session.max_sort_length;
-
-SET @@session.max_sort_length = 8388608;
-SELECT @@session.max_sort_length;
-
-SET @@session.max_sort_length = 8388607;
-SELECT @@session.max_sort_length;
-
-SET @@session.max_sort_length = 65536;
-SELECT @@session.max_sort_length;
-
-
---echo '#------------------FN_DYNVARS_084_05-----------------------#'
-###########################################################
-# Change the value of max_sort_length to an invalid value #
-###########################################################
-
-SET @@global.max_sort_length = -1024;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 3;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 8388609;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = 0;
-SELECT @@global.max_sort_length;
---Error ER_WRONG_TYPE_FOR_VAR
-SET @@global.max_sort_length = 65530.34;
-SELECT @@global.max_sort_length;
---Error ER_WRONG_TYPE_FOR_VAR
-SET @@global.max_sort_length = test;
-SELECT @@global.max_sort_length;
-
-SET @@session.max_sort_length = 8388610;
-SELECT @@session.max_sort_length;
-SET @@session.max_sort_length = -1;
-SELECT @@session.max_sort_length;
-SET @@session.max_sort_length = 3;
-SELECT @@session.max_sort_length;
-SET @@session.max_sort_length = 0;
-SELECT @@session.max_sort_length;
---Error ER_WRONG_TYPE_FOR_VAR
-SET @@session.max_sort_length = 65530.34;
-SET @@session.max_sort_length = 10737418241;
-SELECT @@session.max_sort_length;
---Error ER_WRONG_TYPE_FOR_VAR
-SET @@session.max_sort_length = test;
-SELECT @@session.max_sort_length;
-
-
---echo '#------------------FN_DYNVARS_084_06-----------------------#'
-####################################################################
-# Check if the value in GLOBAL Table matches value in variable #
-####################################################################
-
-
-SELECT @@global.max_sort_length = VARIABLE_VALUE
-FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
-WHERE VARIABLE_NAME='max_sort_length';
-
---echo '#------------------FN_DYNVARS_084_07-----------------------#'
-####################################################################
-# Check if the value in SESSION Table matches value in variable #
-####################################################################
-
-SELECT @@session.max_sort_length = VARIABLE_VALUE
-FROM INFORMATION_SCHEMA.SESSION_VARIABLES
-WHERE VARIABLE_NAME='max_sort_length';
-
-
---echo '#------------------FN_DYNVARS_084_08-----------------------#'
-####################################################################
-# Check if TRUE and FALSE values can be used on variable #
-####################################################################
-
-SET @@global.max_sort_length = TRUE;
-SELECT @@global.max_sort_length;
-SET @@global.max_sort_length = FALSE;
-SELECT @@global.max_sort_length;
-
-
---echo '#---------------------FN_DYNVARS_084_09----------------------#'
-#################################################################################
-# Check if accessing variable with and without GLOBAL point to same variable #
-#################################################################################
-
-SET @@global.max_sort_length = 2048;
-SELECT @@max_sort_length = @@global.max_sort_length;
-
-
---echo '#---------------------FN_DYNVARS_084_10----------------------#'
-########################################################################################################
-# Check if accessing variable with SESSION,LOCAL and without SCOPE points to same session variable #
-########################################################################################################
-
-SET @@max_sort_length = 100000;
-SELECT @@max_sort_length = @@local.max_sort_length;
-SELECT @@local.max_sort_length = @@session.max_sort_length;
-
-
---echo '#---------------------FN_DYNVARS_084_11----------------------#'
-##########################################################################
-# Check if max_sort_length can be accessed with and without @@ sign #
-##########################################################################
-
-
-SET max_sort_length = 1024;
-SELECT @@max_sort_length;
---Error ER_UNKNOWN_TABLE
-SELECT local.max_sort_length;
---Error ER_UNKNOWN_TABLE
-SELECT session.max_sort_length;
---Error ER_BAD_FIELD_ERROR
-SELECT max_sort_length = @@session.max_sort_length;
-
-
-####################################
-# Restore initial value #
-####################################
-
-SET @@global.max_sort_length = @start_global_value;
-SELECT @@global.max_sort_length;
-SET @@session.max_sort_length = @start_session_value;
-SELECT @@session.max_sort_length;
-
-
-####################################################
-# END OF max_sort_length TESTS #
-####################################################
-
diff --git a/mysql-test/suite/sys_vars/t/max_sort_length_func.test b/mysql-test/suite/sys_vars/t/max_sort_length_func.test
index fd0b87750a1..d54453a57b6 100644
--- a/mysql-test/suite/sys_vars/t/max_sort_length_func.test
+++ b/mysql-test/suite/sys_vars/t/max_sort_length_func.test
@@ -26,70 +26,40 @@
SET @start_value= @@global.max_sort_length;
-SET @session_max_sort_length = @@Session.max_sort_length;
-
-
---disable_warnings
-DROP TABLE IF EXISTS t;
---enable_warnings
-
#########################
# Creating new table #
#########################
-
--echo ** creating tables **
-CREATE TABLE t
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c TEXT(30)
-);
-
-CREATE TABLE t1
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c BLOB(30)
-);
-
-CREATE TABLE t2
-(
-id INT AUTO_INCREMENT PRIMARY KEY,
-c TEXT(30)
-);
-
-
+CREATE TABLE t (id INT AUTO_INCREMENT PRIMARY KEY, c TEXT);
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY, c BLOB);
+CREATE TABLE t2 (id INT AUTO_INCREMENT PRIMARY KEY, c TEXT);
--echo '#--------------------FN_DYNVARS_098_01-------------------------#'
##########################################################
# Test behavior of variable on new connection # 01 #
##########################################################
-
connect (test_con1,localhost,root,,);
-connection test_con1;
-
-# Value of session & global vairable here should be 10
-SELECT @@global.max_sort_length = 10;
-SELECT @@session.max_sort_length = 10;
# Setting global value of variable and inserting data in table
---echo ** Setting value to 30 and inserting data **
-SET @@global.max_sort_length = 30;
+--echo ** Setting value to 70 and inserting data **
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
# Setting session value of variable and inserting data in table
-SET @@session.max_sort_length = 29;
+SET @@session.max_sort_length = 69;
SELECT @@session.max_sort_length;
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
@@ -99,72 +69,69 @@ SELECT c from t ORDER BY c, id;
##########################################################
connect (test_con2,localhost,root,,);
-connection test_con2;
-
## Setting global value of variable and inserting data in table
-SET @@global.max_sort_length = 30;
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
# Setting session value of variable and inserting data in table
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
--echo ** Results should not be sorted **
--echo '#--------------------FN_DYNVARS_098_03-------------------------#'
#########################################################
-#Check if sorting is applied with the max_sort_length=20#
+#Check if sorting is applied with the max_sort_length=64#
#########################################################
###########################################
# Setting new value for max_sort_length #
###########################################
-SET max_sort_length=20;
+SET max_sort_length=64;
###################################
# Inserting values in table t #
###################################
-INSERT INTO t set c = repeat('x',29);
-INSERT INTO t set c = concat(repeat('x',28),'r','x');
-INSERT INTO t set c = concat(repeat('x',28),'s','y');
-INSERT INTO t set c = concat(repeat('x',28),'g','w');
+INSERT INTO t set c = repeat('x',69);
+INSERT INTO t set c = concat(repeat('x',68),'r','x');
+INSERT INTO t set c = concat(repeat('x',68),'s','y');
+INSERT INTO t set c = concat(repeat('x',68),'g','w');
SELECT c from t ORDER BY c, id;
--echo ** Results should not be sorted **
RESET QUERY CACHE;
-
--echo '#--------------------FN_DYNVARS_098_04-------------------------#'
#########################################################
-#Check if sorting is applied with the max_sort_length=29#
+#Check if sorting is applied with the max_sort_length=69#
#########################################################
-SET max_sort_length=29;
+SET max_sort_length=69;
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
--echo '#--------------------FN_DYNVARS_098_05-------------------------#'
#########################################################
-#Check if sorting is applied with the max_sort_length=30#
+#Check if sorting is applied with the max_sort_length=70#
#########################################################
-SET max_sort_length=30;
+SET max_sort_length=70;
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
@@ -173,34 +140,33 @@ SELECT c from t ORDER BY c, id;
#Check if sorting is applied with the max_sort_length=Default#
##############################################################
-
SET max_sort_length=default;
+SELECT @@max_sort_length;
SELECT c from t ORDER BY c, id;
--echo ** Results should be sorted **
-
--echo '#--------------------FN_DYNVARS_098_07-------------------------#'
###########################################
#Check if sorting is applied on BLOB type #
###########################################
--echo Testing type BLOB
# Setting global value of variable and inserting data in table
-SET @@global.max_sort_length = 30;
-SELECT @@global.max_sort_length;
-INSERT INTO t1 set c = repeat('x',29);
-INSERT INTO t1 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t1 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t1 set c = concat(repeat('x',28),'g','w');
-SELECT c from t1 ORDER BY c, id;
+SET @@max_sort_length = 70;
+SELECT @@max_sort_length;
+INSERT INTO t1 set c = repeat('x',69);
+INSERT INTO t1 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t1 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t1 set c = concat(repeat('x',68),'g','w');
+SELECT c from t1 ORDER BY c, id DESC;
--echo ** Results should be sorted **
# Setting session value of variable and inserting data in table
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
-INSERT INTO t1 set c = repeat('x',29);
-INSERT INTO t1 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t1 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t1 set c = concat(repeat('x',28),'g','w');
+INSERT INTO t1 set c = repeat('x',69);
+INSERT INTO t1 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t1 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t1 set c = concat(repeat('x',68),'g','w');
SELECT c from t1 ORDER BY c, id;
--echo ** Results should not be sorted **
@@ -210,26 +176,25 @@ SELECT c from t1 ORDER BY c, id;
###########################################
--echo Testing type CHAR
# Setting global value of variable and inserting data in table
-SET @@global.max_sort_length = 30;
+SET @@global.max_sort_length = 70;
SELECT @@global.max_sort_length;
-INSERT INTO t2 set c = repeat('x',29);
-INSERT INTO t2 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t2 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t2 set c = concat(repeat('x',28),'g','w');
+INSERT INTO t2 set c = repeat('x',69);
+INSERT INTO t2 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t2 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t2 set c = concat(repeat('x',68),'g','w');
SELECT c from t2 ORDER BY c, id;
--echo ** Results should not be sorted **
# Setting session value of variable and inserting data in table
-SET @@session.max_sort_length = 20;
+SET @@session.max_sort_length = 64;
SELECT @@session.max_sort_length;
-INSERT INTO t2 set c = repeat('x',29);
-INSERT INTO t2 set c = concat(repeat('x',28),'r','x');
-INSERT INTO t2 set c = concat(repeat('x',28),'s','y');
-INSERT INTO t2 set c = concat(repeat('x',28),'g','w');
+INSERT INTO t2 set c = repeat('x',69);
+INSERT INTO t2 set c = concat(repeat('x',68),'r','x');
+INSERT INTO t2 set c = concat(repeat('x',68),'s','y');
+INSERT INTO t2 set c = concat(repeat('x',68),'g','w');
SELECT c from t2 ORDER BY c, id;
--echo ** Results should not be sorted **
-
#
# Cleanup
#
@@ -239,12 +204,6 @@ connection default;
disconnect test_con1;
disconnect test_con2;
-SET @@SESSION.max_sort_length = @session_max_sort_length;
-
---disable_warnings
-DROP TABLE IF EXISTS t;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
---enable_warnings
+DROP TABLE t, t1, t2;
SET @@global.max_sort_length= @start_value;
diff --git a/mysql-test/suite/sys_vars/t/thread_stack_basic.test b/mysql-test/suite/sys_vars/t/thread_stack_basic.test
index 41015033fe9..39f120e0de1 100644
--- a/mysql-test/suite/sys_vars/t/thread_stack_basic.test
+++ b/mysql-test/suite/sys_vars/t/thread_stack_basic.test
@@ -1,6 +1,8 @@
#
# only global
#
+--source include/not_asan.inc
+--source include/not_ubsan.inc
--replace_result 392192 299008
select @@global.thread_stack;
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
diff --git a/mysql-test/suite/sys_vars/t/wsrep_notify_cmd_basic.test b/mysql-test/suite/sys_vars/t/wsrep_notify_cmd_basic.test
deleted file mode 100644
index 6d1535ba148..00000000000
--- a/mysql-test/suite/sys_vars/t/wsrep_notify_cmd_basic.test
+++ /dev/null
@@ -1,43 +0,0 @@
---source include/have_wsrep.inc
-
---echo #
---echo # wsrep_notify_cmd
---echo #
-
-call mtr.add_suppression("WSREP: Failed to get provider options");
-
---echo # save the initial value
-SET @wsrep_notify_cmd_global_saved = @@global.wsrep_notify_cmd;
-
---echo # default
-SELECT @@global.wsrep_notify_cmd;
-
---echo
---echo # scope
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.wsrep_notify_cmd;
-SET @@global.wsrep_notify_cmd='notify_cmd';
-SELECT @@global.wsrep_notify_cmd;
-
---echo
---echo # valid values
-SET @@global.wsrep_notify_cmd='command';
-SELECT @@global.wsrep_notify_cmd;
-SET @@global.wsrep_notify_cmd='hyphenated-command';
-SELECT @@global.wsrep_notify_cmd;
-SET @@global.wsrep_notify_cmd=default;
-SELECT @@global.wsrep_notify_cmd;
-SET @@global.wsrep_notify_cmd=NULL;
-SELECT @@global.wsrep_notify_cmd;
-
---echo
---echo # invalid values
---error ER_WRONG_TYPE_FOR_VAR
-SET @@global.wsrep_notify_cmd=1;
-SELECT @@global.wsrep_notify_cmd;
-
---echo
---echo # restore the initial value
-SET @@global.wsrep_notify_cmd = @wsrep_notify_cmd_global_saved;
-
---echo # End of test
diff --git a/mysql-test/suite/sys_vars/t/wsrep_on_without_provider.test b/mysql-test/suite/sys_vars/t/wsrep_on_without_provider.test
new file mode 100644
index 00000000000..5bee3c9a356
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/wsrep_on_without_provider.test
@@ -0,0 +1,9 @@
+--source include/not_embedded.inc
+
+#
+# @@global.wsrep_on is not allowed if there
+# is no wsrep_provider
+#
+--error ER_WRONG_ARGUMENTS
+SET GLOBAL wsrep_on=ON;
+SELECT @@global.wsrep_on; \ No newline at end of file
diff --git a/mysql-test/suite/sys_vars/t/wsrep_provider_basic.test b/mysql-test/suite/sys_vars/t/wsrep_provider_basic.test
deleted file mode 100644
index 1190ab41bb0..00000000000
--- a/mysql-test/suite/sys_vars/t/wsrep_provider_basic.test
+++ /dev/null
@@ -1,39 +0,0 @@
---source include/have_wsrep.inc
-
---echo #
---echo # wsrep_provider
---echo #
-
---echo # save the initial value
-SET @wsrep_provider_global_saved = @@global.wsrep_provider;
-
---echo # default
-SELECT @@global.wsrep_provider;
-
---echo
---echo # scope
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.wsrep_provider;
-SELECT @@global.wsrep_provider;
-
---echo
---echo # valid values
-SET @@global.wsrep_provider=default;
-SELECT @@global.wsrep_provider;
-
---echo
---echo # invalid values
---error ER_WRONG_VALUE_FOR_VAR
-SET @@global.wsrep_provider='/invalid/libgalera_smm.so';
---error ER_WRONG_VALUE_FOR_VAR
-SET @@global.wsrep_provider=NULL;
-SELECT @@global.wsrep_provider;
---error ER_WRONG_TYPE_FOR_VAR
-SET @@global.wsrep_provider=1;
-SELECT @@global.wsrep_provider;
-
---echo
---echo # restore the initial value
-SET @@global.wsrep_provider = @wsrep_provider_global_saved;
-
---echo # End of test
diff --git a/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test b/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test
deleted file mode 100644
index 6eb3a94b6a4..00000000000
--- a/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test
+++ /dev/null
@@ -1,41 +0,0 @@
---source include/have_wsrep.inc
-
---echo #
---echo # wsrep_provider_options
---echo #
-
-call mtr.add_suppression("WSREP: Failed to get provider options");
-
---echo # default
-SELECT @@global.wsrep_provider_options;
-
---echo
---echo # scope
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.wsrep_provider_options;
---error 0,ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options='option1';
-SELECT @@global.wsrep_provider_options;
-
---echo
---echo # valid values
---error ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options='name1=value1;name2=value2';
-SELECT @@global.wsrep_provider_options;
---error ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options='hyphenated-name:value';
-SELECT @@global.wsrep_provider_options;
---error ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options=default;
-SELECT @@global.wsrep_provider_options;
-
---echo
---echo # invalid values
---error ER_WRONG_TYPE_FOR_VAR
-SET @@global.wsrep_provider_options=1;
-SELECT @@global.wsrep_provider_options;
---error ER_WRONG_ARGUMENTS,ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options=NULL;
-SELECT @@global.wsrep_provider_options;
-
---echo # End of test
diff --git a/mysql-test/suite/unit/suite.pm b/mysql-test/suite/unit/suite.pm
index b7a1f9ae871..53f8923777a 100644
--- a/mysql-test/suite/unit/suite.pm
+++ b/mysql-test/suite/unit/suite.pm
@@ -20,7 +20,6 @@ sub start_test {
($path, $args) = ($cmd, , [ ])
}
-
my $oldpwd=getcwd();
chdir $::opt_vardir;
my $proc=My::SafeProcess->new
@@ -49,12 +48,12 @@ sub start_test {
my ($command, %tests, $prefix);
for (@ctest_list) {
chomp;
- if (/^\d+: Test command: +/) {
- $command= $';
+ if (/^\d+: Test command: +([^ \t]+.*)/) {
+ $command= $1;
$prefix= /libmariadb/ ? 'conc_' : '';
- } elsif (/^ +Test +#\d+: +/) {
- if ($command ne "NOT_AVAILABLE") {
- $tests{$prefix.$'}=$command;
+ } elsif (/^ +Test +#\d+: ([^ \t]+.*)/) {
+ if ($command ne "NOT_AVAILABLE" && $command ne "/bin/sh") {
+ $tests{$prefix.$1}=$command;
}
}
}
diff --git a/mysql-test/suite/vcol/r/vcol_syntax.result b/mysql-test/suite/vcol/r/vcol_syntax.result
index 16e30e57230..c8983f34c93 100644
--- a/mysql-test/suite/vcol/r/vcol_syntax.result
+++ b/mysql-test/suite/vcol/r/vcol_syntax.result
@@ -50,3 +50,41 @@ t1 CREATE TABLE "t1" (
)
drop table t1;
set session sql_mode=@OLD_SQL_MODE;
+#
+# MDEV-25091 CREATE TABLE: field references qualified by a wrong table name succeed
+#
+create table t2 (x int);
+create table t1 (x int, y int generated always as (t2.x));
+ERROR 42S22: Unknown column '`t2`.`x`' in 'GENERATED ALWAYS'
+create table t1 (x int, y int check (y > t2.x));
+ERROR 42S22: Unknown column '`t2`.`x`' in 'CHECK'
+create table t1 (x int, y int default t2.x);
+ERROR 42S22: Unknown column '`t2`.`x`' in 'DEFAULT'
+create table t1 (x int, check (t2.x > 0));
+ERROR 42S22: Unknown column '`t2`.`x`' in 'CHECK'
+create table t1 (x int);
+alter table t1 add column y int generated always as (t2.x);
+ERROR 42S22: Unknown column '`t2`.`x`' in 'GENERATED ALWAYS'
+alter table t1 add column y int check (z > t2.x);
+ERROR 42S22: Unknown column '`t2`.`x`' in 'CHECK'
+alter table t1 add column y int default t2.x;
+ERROR 42S22: Unknown column '`t2`.`x`' in 'DEFAULT'
+alter table t1 add constraint check (t2.x > 0);
+ERROR 42S22: Unknown column '`t2`.`x`' in 'CHECK'
+create or replace table t1 (x int, y int generated always as (t1.x));
+create or replace table t1 (x int, y int check (y > t1.x));
+create or replace table t1 (x int, y int default t1.x);
+create or replace table t1 (x int, check (t1.x > 0));
+create or replace table t1 (x int, y int generated always as (test.t1.x));
+create or replace table t1 (x int, y int check (y > test.t1.x));
+create or replace table t1 (x int, y int default test.t1.x);
+create or replace table t1 (x int, check (test.t1.x > 0));
+drop tables t1, t2;
+create table t1 (x int, y int generated always as (test2.t1.x));
+ERROR 42S22: Unknown column '`test2`.`t1`.`x`' in 'GENERATED ALWAYS'
+create table t1 (x int, y int check (y > test2.t1.x));
+ERROR 42S22: Unknown column '`test2`.`t1`.`x`' in 'CHECK'
+create table t1 (x int, y int default test2.t1.x);
+ERROR 42S22: Unknown column '`test2`.`t1`.`x`' in 'DEFAULT'
+create table t1 (x int, check (test2.t1.x > 0));
+ERROR 42S22: Unknown column '`test2`.`t1`.`x`' in 'CHECK'
diff --git a/mysql-test/suite/vcol/t/vcol_syntax.test b/mysql-test/suite/vcol/t/vcol_syntax.test
index 6dc3cf43317..f425b52ab79 100644
--- a/mysql-test/suite/vcol/t/vcol_syntax.test
+++ b/mysql-test/suite/vcol/t/vcol_syntax.test
@@ -28,3 +28,47 @@ show create table t1;
drop table t1;
set session sql_mode=@OLD_SQL_MODE;
+--echo #
+--echo # MDEV-25091 CREATE TABLE: field references qualified by a wrong table name succeed
+--echo #
+create table t2 (x int);
+
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int generated always as (t2.x));
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int check (y > t2.x));
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int default t2.x);
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, check (t2.x > 0));
+
+create table t1 (x int);
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column y int generated always as (t2.x);
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column y int check (z > t2.x);
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column y int default t2.x;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add constraint check (t2.x > 0);
+
+create or replace table t1 (x int, y int generated always as (t1.x));
+create or replace table t1 (x int, y int check (y > t1.x));
+create or replace table t1 (x int, y int default t1.x);
+create or replace table t1 (x int, check (t1.x > 0));
+
+create or replace table t1 (x int, y int generated always as (test.t1.x));
+create or replace table t1 (x int, y int check (y > test.t1.x));
+create or replace table t1 (x int, y int default test.t1.x);
+create or replace table t1 (x int, check (test.t1.x > 0));
+
+drop tables t1, t2;
+
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int generated always as (test2.t1.x));
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int check (y > test2.t1.x));
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, y int default test2.t1.x);
+--error ER_BAD_FIELD_ERROR
+create table t1 (x int, check (test2.t1.x > 0));
diff --git a/mysql-test/suite/versioning/common.inc b/mysql-test/suite/versioning/common.inc
index efb081a02e4..25adf15dd50 100644
--- a/mysql-test/suite/versioning/common.inc
+++ b/mysql-test/suite/versioning/common.inc
@@ -70,6 +70,11 @@ returns int
deterministic
return sys_trx_end = $sys_datatype_max;
+eval create or replace function current_row_ts(sys_trx_end timestamp(6))
+returns int
+deterministic
+ return convert_tz(sys_trx_end, '+00:00', @@time_zone) = TIMESTAMP'2038-01-19 03:14:07.999999';
+
delimiter ~~;
eval create or replace function check_row(row_start $sys_datatype_expl, row_end $sys_datatype_expl)
returns varchar(255)
@@ -86,4 +91,20 @@ begin
end~~
delimiter ;~~
+delimiter ~~;
+eval create or replace function check_row_ts(row_start timestamp(6), row_end timestamp(6))
+returns varchar(255)
+deterministic
+begin
+ if row_end < row_start then
+ return "ERROR: row_end < row_start";
+ elseif row_end = row_start then
+ return "ERROR: row_end == row_start";
+ elseif current_row_ts(row_end) then
+ return "CURRENT ROW";
+ end if;
+ return "HISTORICAL ROW";
+end~~
+delimiter ;~~
+
--enable_query_log
diff --git a/mysql-test/suite/versioning/common_finish.inc b/mysql-test/suite/versioning/common_finish.inc
index 61641c6c5ce..3c4e7b66ff3 100644
--- a/mysql-test/suite/versioning/common_finish.inc
+++ b/mysql-test/suite/versioning/common_finish.inc
@@ -4,5 +4,7 @@ drop procedure if exists verify_trt;
drop procedure if exists verify_trt_dummy;
drop function if exists current_row;
drop function if exists check_row;
+drop function if exists current_row_ts;
+drop function if exists check_row_ts;
--enable_warnings
--enable_query_log
diff --git a/mysql-test/suite/versioning/r/alter.result b/mysql-test/suite/versioning/r/alter.result
index 33c1d499088..b2dbbba7027 100644
--- a/mysql-test/suite/versioning/r/alter.result
+++ b/mysql-test/suite/versioning/r/alter.result
@@ -80,7 +80,7 @@ t CREATE TABLE `t` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
alter table t add column trx_start timestamp(6) as row start;
-ERROR HY000: Duplicate ROW START column `trx_start`
+ERROR HY000: Table `t` is not system-versioned
alter table t add system versioning;
show create table t;
Table Create Table
@@ -696,3 +696,69 @@ delete from t1;
set statement system_versioning_alter_history=keep for
alter table t1 drop system versioning, modify column a tinyint;
drop table t1;
+#
+# MDEV-24690 Dropping primary key column from versioned table always fails with 1072
+#
+create table t1 (a int, b int primary key) with system versioning;
+alter table t1 drop column b;
+create or replace table t1 (
+a int, b int primary key,
+row_start timestamp(6) as row start,
+row_end timestamp(6) as row end,
+period for system_time(row_start, row_end)
+) with system versioning;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) NOT NULL,
+ `row_start` timestamp(6) GENERATED ALWAYS AS ROW START,
+ `row_end` timestamp(6) GENERATED ALWAYS AS ROW END,
+ PRIMARY KEY (`b`,`row_end`),
+ PERIOD FOR SYSTEM_TIME (`row_start`, `row_end`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
+alter table t1 drop column b;
+ERROR 42000: Key column 'b' doesn't exist in table
+create or replace table t1 (
+a int, b int primary key,
+row_start timestamp(6) as row start invisible,
+row_end timestamp(6) as row end invisible,
+period for system_time(row_start, row_end)
+) with system versioning;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) NOT NULL,
+ `row_start` timestamp(6) GENERATED ALWAYS AS ROW START INVISIBLE,
+ `row_end` timestamp(6) GENERATED ALWAYS AS ROW END INVISIBLE,
+ PRIMARY KEY (`b`,`row_end`),
+ PERIOD FOR SYSTEM_TIME (`row_start`, `row_end`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
+alter table t1 drop column b;
+ERROR 42000: Key column 'b' doesn't exist in table
+drop table t1;
+#
+# MDEV-25172 Wrong error message for ADD COLUMN .. AS ROW START
+#
+create or replace table t1 (x int);
+alter table t1 add column y timestamp(6) as row start;
+ERROR HY000: Table `t1` is not system-versioned
+drop table t1;
+#
+# MDEV-25327 Unexpected ER_DUP_ENTRY upon dropping PK column from system-versioned table
+#
+create table t1 (pk int, a int, primary key (pk), key (a))
+with system versioning;
+insert into t1 values (1, 1), (2, 2);
+delete from t1;
+set system_versioning_alter_history= keep;
+alter table t1 drop pk;
+drop table t1;
+create table t1 (pk int, a int, primary key (pk), key (a))
+with system versioning;
+insert into t1 values (1, 2), (2, 8), (3, 4), (4, 4), (5, 0);
+delete from t1;
+set system_versioning_alter_history= keep;
+alter ignore table t1 drop pk;
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/auto_increment.result b/mysql-test/suite/versioning/r/autoinc.result
index 8ff1bed8fe3..e785c5d300e 100644
--- a/mysql-test/suite/versioning/r/auto_increment.result
+++ b/mysql-test/suite/versioning/r/autoinc.result
@@ -63,3 +63,13 @@ A x y x y
1 7 17 7 17
drop table t1;
drop table t2;
+#
+# MDEV-22562 Assertion `next_insert_id == 0' upon UPDATE on system-versioned table
+#
+create table t1 (pk integer auto_increment primary key) engine=myisam with system versioning;
+insert delayed into t1 (pk) values (1);
+lock tables t1 write;
+update t1 set pk= 0;
+update t1 set pk= 0;
+unlock tables;
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/delete.result b/mysql-test/suite/versioning/r/delete.result
index 5aa239b9cb8..0f9e2c22130 100644
--- a/mysql-test/suite/versioning/r/delete.result
+++ b/mysql-test/suite/versioning/r/delete.result
@@ -130,3 +130,22 @@ ERROR 42S02: Table 'test.xx' doesn't exist
drop procedure pr;
drop trigger tr;
drop table t1;
+#
+# MDEV-21138 Assertion `col->ord_part' or `f.col->ord_part' failed in row_build_index_entry_low
+#
+create table t1 (
+f1 int, f2 text, f3 int, fulltext (f2), key(f1), key(f3),
+foreign key r (f3) references t1 (f1) on delete set null)
+with system versioning engine innodb;
+insert into t1 values (1, repeat('a', 8193), 1), (1, repeat('b', 8193), 1);
+select f1, f3, check_row_ts(row_start, row_end) from t1;
+f1 f3 check_row_ts(row_start, row_end)
+1 1 CURRENT ROW
+1 1 CURRENT ROW
+delete from t1;
+select f1, f3, check_row_ts(row_start, row_end) from t1 for system_time all;
+f1 f3 check_row_ts(row_start, row_end)
+1 1 HISTORICAL ROW
+1 NULL ERROR: row_end == row_start
+1 1 HISTORICAL ROW
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/foreign.result b/mysql-test/suite/versioning/r/foreign.result
index 1b9925b1e62..288909bbc37 100644
--- a/mysql-test/suite/versioning/r/foreign.result
+++ b/mysql-test/suite/versioning/r/foreign.result
@@ -400,6 +400,8 @@ Warning 1265 Data truncated for column 'f12' at row 7
SET timestamp = 9;
REPLACE INTO t2 SELECT * FROM t2;
DROP TABLE t1, t2;
+set timestamp= default;
+set time_zone='+00:00';
#
# MDEV-16210 FK constraints on versioned tables use historical rows, which may cause constraint violation
#
@@ -429,3 +431,17 @@ insert into t2 values (1), (1);
# DELETE from foreign table is allowed
delete from t2;
drop tables t2, t1;
+#
+# MDEV-23644 Assertion on evaluating foreign referential action for self-reference in system versioned table
+#
+create table t1 (pk int primary key, f1 int,f2 int, f3 text,
+key(f1), fulltext(f3), key(f3(10)),
+foreign key (f2) references t1 (f1) on delete set null
+) engine=innodb with system versioning;
+insert into t1 values (1, 8, 8, 'SHORT'), (2, 8, 8, repeat('LONG', 8071));
+delete from t1;
+select pk, f1, f2, left(f3, 4), check_row_ts(row_start, row_end) from t1 for system_time all order by pk;
+pk f1 f2 left(f3, 4) check_row_ts(row_start, row_end)
+1 8 8 SHOR HISTORICAL ROW
+2 8 8 LONG HISTORICAL ROW
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/partition.result b/mysql-test/suite/versioning/r/partition.result
index 9e25dc6910b..9eeec045ef7 100644
--- a/mysql-test/suite/versioning/r/partition.result
+++ b/mysql-test/suite/versioning/r/partition.result
@@ -538,6 +538,7 @@ set timestamp=1523466002.799571;
insert into t1 values (11),(12);
set timestamp=1523466004.169435;
delete from t1 where pk in (11, 12);
+set timestamp= default;
#
# MDEV-18136 Server crashes in Item_func_dyncol_create::prepare_arguments
#
@@ -690,6 +691,48 @@ create table t1 (a int) with system versioning partition by system_time
(partition p1 history, partition pn current);
alter table t1 add partition (partition p2);
ERROR HY000: Wrong partitioning type, expected type: `SYSTEM_TIME`
+# MDEV-17891 Assertion failures in select_insert::abort_result_set and
+# mysql_load upon attempt to replace into a full table
+set @@max_heap_table_size= 1024*1024;
+create or replace table t1 (
+pk integer auto_increment,
+primary key (pk),
+f varchar(45000)
+) with system versioning engine=memory
+partition by system_time interval 1 year (partition p1 history,
+partition pn current);
+# fill the table until full
+insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),();
+insert into t1 (f) select f from t1;
+ERROR HY000: The table 't1' is full
+# leave space for exactly one record in current partition
+delete from t1 where pk = 1;
+# copy all data into history partition
+replace into t1 select * from t1;
+replace into t1 select * from t1;
+ERROR HY000: The table 't1' is full
+create or replace table t1 (
+pk integer auto_increment,
+primary key (pk),
+f varchar(45000)
+) with system versioning engine=memory
+partition by system_time interval 1 year (partition p1 history,
+partition pn current);
+insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),();
+select * into outfile 'load.data' from t1;
+load data infile 'load.data' replace into table t1;
+load data infile 'load.data' replace into table t1;
+ERROR HY000: The table 't1' is full
+load data infile 'load.data' replace into table t1;
+ERROR HY000: The table 't1' is full
+set @@max_heap_table_size= 1048576;
+drop table t1;
+#
+# MDEV-22178 Assertion `info->alias.str' failed in partition_info::check_partition_info instead of ER_VERS_WRONG_PARTS
+#
+create or replace table t1 (a int) with system versioning;
+alter table t1 partition by system_time (partition pn current);
+ERROR HY000: Wrong partitions for `t1`: must have at least one HISTORY and exactly one last CURRENT
drop table t1;
# End of 10.3 tests
#
diff --git a/mysql-test/suite/versioning/r/replace.result b/mysql-test/suite/versioning/r/replace.result
index bda61f118b0..57a992cce49 100644
--- a/mysql-test/suite/versioning/r/replace.result
+++ b/mysql-test/suite/versioning/r/replace.result
@@ -48,3 +48,16 @@ INSERT INTO t1 () VALUES (),(),(),(),(),();
UPDATE IGNORE t1 SET f = 1;
REPLACE t1 SELECT * FROM t1;
DROP TABLE t1;
+# MDEV-22540 ER_DUP_ENTRY upon REPLACE or Assertion failed
+set timestamp=1589245268.41934;
+create table t1 (a int primary key) with system versioning;
+insert into t1 values (1),(2);
+connect con1,localhost,root,,test;
+set timestamp=1589245268.52093;
+replace into t1 values (1),(2);
+connection default;
+replace into t1 values (1),(2);
+connection con1;
+replace into t1 values (1),(2);
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+drop table t1;
diff --git a/mysql-test/suite/versioning/r/trx_id.result b/mysql-test/suite/versioning/r/trx_id.result
index 8475fc88258..03ef20a6a9e 100644
--- a/mysql-test/suite/versioning/r/trx_id.result
+++ b/mysql-test/suite/versioning/r/trx_id.result
@@ -5,7 +5,15 @@ sys_trx_start bigint(20) unsigned as row start invisible,
sys_trx_end bigint(20) unsigned as row end invisible,
period for system_time (sys_trx_start, sys_trx_end)
) with system versioning;
+# No history inside the transaction
+start transaction;
insert into t1 (x) values (1);
+update t1 set x= x + 1;
+update t1 set x= x + 1;
+commit;
+select *, sys_trx_start > 1, sys_trx_end from t1 for system_time all;
+x sys_trx_start > 1 sys_trx_end
+3 1 18446744073709551615
# ALTER ADD SYSTEM VERSIONING should write to mysql.transaction_registry
set @@system_versioning_alter_history=keep;
create or replace table t1 (x int);
diff --git a/mysql-test/suite/versioning/r/update.result b/mysql-test/suite/versioning/r/update.result
index cd26c341113..da893432749 100644
--- a/mysql-test/suite/versioning/r/update.result
+++ b/mysql-test/suite/versioning/r/update.result
@@ -241,6 +241,26 @@ B2 salary
1 2500
drop table t1;
drop table t2;
+# Ensure FTS retains correct history
+create table t1 (
+x int, y text, fulltext (y),
+row_start SYS_DATATYPE as row start invisible,
+row_end SYS_DATATYPE as row end invisible,
+period for system_time (row_start, row_end))
+with system versioning engine innodb;
+insert into t1 values (1, repeat('LONG', 2048));
+update t1 set x= x + 1;
+select x, left(y, 4), length(y), check_row(row_start, row_end) from t1 for system_time all order by x, y;
+x left(y, 4) length(y) check_row(row_start, row_end)
+1 LONG 8192 HISTORICAL ROW
+2 LONG 8192 CURRENT ROW
+update t1 set y= 'SHORT';
+select x, left(y, 4), length(y), check_row(row_start, row_end) from t1 for system_time all order by x, y;
+x left(y, 4) length(y) check_row(row_start, row_end)
+1 LONG 8192 HISTORICAL ROW
+2 LONG 8192 HISTORICAL ROW
+2 SHOR 5 CURRENT ROW
+drop tables t1;
### Issue tempesta-tech/mariadb#365, bug 7 (duplicate of historical row)
create or replace table t1 (a int primary key, b int)
with system versioning engine myisam;
@@ -350,3 +370,48 @@ insert into t1 (a) values (1), (2);
update ignore t1 set a= 3;
delete history from t1;
drop table t1;
+#
+# MDEV-23446 UPDATE does not insert history row if the row is not changed
+#
+create table t1 (
+a int,
+row_start SYS_DATATYPE as row start invisible,
+row_end SYS_DATATYPE as row end invisible,
+period for system_time (row_start, row_end)) with system versioning;
+insert into t1 values (1);
+update t1 set a= 1;
+select *, check_row(row_start, row_end) from t1 for system_time all order by row_end;
+a check_row(row_start, row_end)
+1 HISTORICAL ROW
+1 CURRENT ROW
+# multi-update
+create or replace table t2 like t1;
+create or replace table t3 like t1;
+insert into t2 values (1);
+insert into t3 values (1);
+update t2, t3 set t2.a= 1, t3.a= 1 where t2.a = t3.a;
+select *, check_row(row_start, row_end) from t2 for system_time all order by row_end;
+a check_row(row_start, row_end)
+1 HISTORICAL ROW
+1 CURRENT ROW
+select *, check_row(row_start, row_end) from t2 for system_time all order by row_end;
+a check_row(row_start, row_end)
+1 HISTORICAL ROW
+1 CURRENT ROW
+drop tables t1, t2, t3;
+#
+# MDEV-24522 Assertion `inited==NONE' fails upon UPDATE on versioned table with unique blob
+
+create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+insert into t1 values (1, 1, 'foo'), (2, 11, 'bar');
+update t1 set a = 3 where b <= 9;
+update t1 set a = 3 where b <= 10;
+drop table t1;
+create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+create table t2 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+insert into t1 values (1, 1, 'foo'), (2, 11, 'bar');
+insert into t2 values (1, 1, 'foo'), (2, 11, 'bar');
+update t1 set a = 3 where b <= 9;
+update t2 set a = 3 where b <= 9;
+update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b = t2.b;
+drop tables t1, t2;
diff --git a/mysql-test/suite/versioning/t/alter.test b/mysql-test/suite/versioning/t/alter.test
index 83c3e911fad..16f391b1454 100644
--- a/mysql-test/suite/versioning/t/alter.test
+++ b/mysql-test/suite/versioning/t/alter.test
@@ -68,7 +68,7 @@ select row_start from t;
alter table t drop system versioning;
show create table t;
---error ER_VERS_DUPLICATE_ROW_START_END
+--error ER_VERS_NOT_VERSIONED
alter table t add column trx_start timestamp(6) as row start;
alter table t add system versioning;
@@ -593,3 +593,63 @@ alter table t1 drop system versioning, modify column a tinyint;
# cleanup
drop table t1;
+
+--echo #
+--echo # MDEV-24690 Dropping primary key column from versioned table always fails with 1072
+--echo #
+create table t1 (a int, b int primary key) with system versioning;
+alter table t1 drop column b;
+
+create or replace table t1 (
+ a int, b int primary key,
+ row_start timestamp(6) as row start,
+ row_end timestamp(6) as row end,
+ period for system_time(row_start, row_end)
+) with system versioning;
+show create table t1;
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 drop column b;
+
+create or replace table t1 (
+a int, b int primary key,
+ row_start timestamp(6) as row start invisible,
+ row_end timestamp(6) as row end invisible,
+ period for system_time(row_start, row_end)
+) with system versioning;
+show create table t1;
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 drop column b;
+
+# cleanup
+drop table t1;
+
+--echo #
+--echo # MDEV-25172 Wrong error message for ADD COLUMN .. AS ROW START
+--echo #
+create or replace table t1 (x int);
+--error ER_VERS_NOT_VERSIONED
+alter table t1 add column y timestamp(6) as row start;
+# cleanup
+drop table t1;
+
+
+--echo #
+--echo # MDEV-25327 Unexpected ER_DUP_ENTRY upon dropping PK column from system-versioned table
+--echo #
+create table t1 (pk int, a int, primary key (pk), key (a))
+with system versioning;
+insert into t1 values (1, 1), (2, 2);
+delete from t1;
+set system_versioning_alter_history= keep;
+alter table t1 drop pk;
+# cleanup
+drop table t1;
+
+create table t1 (pk int, a int, primary key (pk), key (a))
+with system versioning;
+insert into t1 values (1, 2), (2, 8), (3, 4), (4, 4), (5, 0);
+delete from t1;
+set system_versioning_alter_history= keep;
+alter ignore table t1 drop pk;
+# cleanup
+drop table t1;
diff --git a/mysql-test/suite/versioning/t/auto_increment.test b/mysql-test/suite/versioning/t/autoinc.test
index 804c0424179..7c87c17301a 100644
--- a/mysql-test/suite/versioning/t/auto_increment.test
+++ b/mysql-test/suite/versioning/t/autoinc.test
@@ -47,4 +47,17 @@ select t1.x = t2.x and t1.y = t2.y as A, t1.x, t1.y, t2.x, t2.y from t1 inner jo
drop table t1;
drop table t2;
+--echo #
+--echo # MDEV-22562 Assertion `next_insert_id == 0' upon UPDATE on system-versioned table
+--echo #
+create table t1 (pk integer auto_increment primary key) engine=myisam with system versioning;
+insert delayed into t1 (pk) values (1);
+lock tables t1 write;
+update t1 set pk= 0;
+update t1 set pk= 0;
+unlock tables;
+
+# cleanup
+drop table t1;
+
-- source suite/versioning/common_finish.inc
diff --git a/mysql-test/suite/versioning/t/delete.test b/mysql-test/suite/versioning/t/delete.test
index 492463f9395..a5a0497fef2 100644
--- a/mysql-test/suite/versioning/t/delete.test
+++ b/mysql-test/suite/versioning/t/delete.test
@@ -94,4 +94,19 @@ drop procedure pr;
drop trigger tr;
drop table t1;
+--echo #
+--echo # MDEV-21138 Assertion `col->ord_part' or `f.col->ord_part' failed in row_build_index_entry_low
+--echo #
+create table t1 (
+ f1 int, f2 text, f3 int, fulltext (f2), key(f1), key(f3),
+ foreign key r (f3) references t1 (f1) on delete set null)
+with system versioning engine innodb;
+insert into t1 values (1, repeat('a', 8193), 1), (1, repeat('b', 8193), 1);
+select f1, f3, check_row_ts(row_start, row_end) from t1;
+delete from t1;
+select f1, f3, check_row_ts(row_start, row_end) from t1 for system_time all;
+
+# cleanup
+drop table t1;
+
--source suite/versioning/common_finish.inc
diff --git a/mysql-test/suite/versioning/t/foreign.test b/mysql-test/suite/versioning/t/foreign.test
index 453ddd34034..4143cd59048 100644
--- a/mysql-test/suite/versioning/t/foreign.test
+++ b/mysql-test/suite/versioning/t/foreign.test
@@ -421,6 +421,8 @@ REPLACE INTO t2 SELECT * FROM t2;
# Cleanup
DROP TABLE t1, t2;
+set timestamp= default;
+set time_zone='+00:00';
--let $datadir= `select @@datadir`
--remove_file $datadir/test/t1.data
--remove_file $datadir/test/t1.data.2
@@ -458,4 +460,20 @@ insert into t2 values (1), (1);
delete from t2;
drop tables t2, t1;
+--echo #
+--echo # MDEV-23644 Assertion on evaluating foreign referential action for self-reference in system versioned table
+--echo #
+create table t1 (pk int primary key, f1 int,f2 int, f3 text,
+ key(f1), fulltext(f3), key(f3(10)),
+ foreign key (f2) references t1 (f1) on delete set null
+) engine=innodb with system versioning;
+
+insert into t1 values (1, 8, 8, 'SHORT'), (2, 8, 8, repeat('LONG', 8071));
+
+delete from t1;
+select pk, f1, f2, left(f3, 4), check_row_ts(row_start, row_end) from t1 for system_time all order by pk;
+
+# cleanup
+drop table t1;
+
--source suite/versioning/common_finish.inc
diff --git a/mysql-test/suite/versioning/t/partition.test b/mysql-test/suite/versioning/t/partition.test
index 03d396b5e6c..f50e6c07b64 100644
--- a/mysql-test/suite/versioning/t/partition.test
+++ b/mysql-test/suite/versioning/t/partition.test
@@ -473,6 +473,7 @@ set timestamp=1523466002.799571;
insert into t1 values (11),(12);
set timestamp=1523466004.169435;
delete from t1 where pk in (11, 12);
+set timestamp= default;
--echo #
--echo # MDEV-18136 Server crashes in Item_func_dyncol_create::prepare_arguments
@@ -640,6 +641,59 @@ create table t1 (a int) with system versioning partition by system_time
--error ER_PARTITION_WRONG_TYPE
alter table t1 add partition (partition p2);
+--echo # MDEV-17891 Assertion failures in select_insert::abort_result_set and
+--echo # mysql_load upon attempt to replace into a full table
+
+--let $max_heap_table_size_orig= `select @@max_heap_table_size;`
+set @@max_heap_table_size= 1024*1024;
+create or replace table t1 (
+ pk integer auto_increment,
+ primary key (pk),
+ f varchar(45000)
+) with system versioning engine=memory
+ partition by system_time interval 1 year (partition p1 history,
+ partition pn current);
+
+--echo # fill the table until full
+insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),();
+--error ER_RECORD_FILE_FULL
+insert into t1 (f) select f from t1;
+--echo # leave space for exactly one record in current partition
+delete from t1 where pk = 1;
+--echo # copy all data into history partition
+replace into t1 select * from t1;
+--error ER_RECORD_FILE_FULL
+replace into t1 select * from t1;
+
+create or replace table t1 (
+ pk integer auto_increment,
+ primary key (pk),
+ f varchar(45000)
+) with system versioning engine=memory
+ partition by system_time interval 1 year (partition p1 history,
+ partition pn current);
+
+insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),();
+
+select * into outfile 'load.data' from t1;
+load data infile 'load.data' replace into table t1;
+--error ER_RECORD_FILE_FULL
+load data infile 'load.data' replace into table t1;
+--error ER_RECORD_FILE_FULL
+load data infile 'load.data' replace into table t1;
+
+# Cleanup
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load.data
+eval set @@max_heap_table_size= $max_heap_table_size_orig;
+drop table t1;
+
+--echo #
+--echo # MDEV-22178 Assertion `info->alias.str' failed in partition_info::check_partition_info instead of ER_VERS_WRONG_PARTS
+--echo #
+create or replace table t1 (a int) with system versioning;
+--error ER_VERS_WRONG_PARTS
+alter table t1 partition by system_time (partition pn current);
# Cleanup
drop table t1;
diff --git a/mysql-test/suite/versioning/t/replace.test b/mysql-test/suite/versioning/t/replace.test
index 392c0ffcf35..83489f4a4b9 100644
--- a/mysql-test/suite/versioning/t/replace.test
+++ b/mysql-test/suite/versioning/t/replace.test
@@ -59,4 +59,22 @@ UPDATE IGNORE t1 SET f = 1;
REPLACE t1 SELECT * FROM t1;
DROP TABLE t1;
+--echo # MDEV-22540 ER_DUP_ENTRY upon REPLACE or Assertion failed
+set timestamp=1589245268.41934;
+create table t1 (a int primary key) with system versioning;
+insert into t1 values (1),(2);
+
+--connect (con1,localhost,root,,test)
+set timestamp=1589245268.52093;
+replace into t1 values (1),(2);
+
+--connection default
+replace into t1 values (1),(2);
+
+--connection con1
+--error ER_DUP_ENTRY
+replace into t1 values (1),(2);
+
+drop table t1;
+
--source suite/versioning/common_finish.inc
diff --git a/mysql-test/suite/versioning/t/trx_id.test b/mysql-test/suite/versioning/t/trx_id.test
index 38724a47fd1..7dfc8acb080 100644
--- a/mysql-test/suite/versioning/t/trx_id.test
+++ b/mysql-test/suite/versioning/t/trx_id.test
@@ -14,7 +14,13 @@ create or replace table t1 (
period for system_time (sys_trx_start, sys_trx_end)
) with system versioning;
+--echo # No history inside the transaction
+start transaction;
insert into t1 (x) values (1);
+update t1 set x= x + 1;
+update t1 set x= x + 1;
+commit;
+select *, sys_trx_start > 1, sys_trx_end from t1 for system_time all;
--echo # ALTER ADD SYSTEM VERSIONING should write to mysql.transaction_registry
set @@system_versioning_alter_history=keep;
diff --git a/mysql-test/suite/versioning/t/update.test b/mysql-test/suite/versioning/t/update.test
index 06f81ea9064..47a56a71bd3 100644
--- a/mysql-test/suite/versioning/t/update.test
+++ b/mysql-test/suite/versioning/t/update.test
@@ -147,6 +147,21 @@ select @tmp2 = sys_trx_start as B2, salary from t2;
drop table t1;
drop table t2;
+--echo # Ensure FTS retains correct history
+replace_result $sys_datatype_expl SYS_DATATYPE;
+eval create table t1 (
+ x int, y text, fulltext (y),
+ row_start $sys_datatype_expl as row start invisible,
+ row_end $sys_datatype_expl as row end invisible,
+ period for system_time (row_start, row_end))
+with system versioning engine innodb;
+insert into t1 values (1, repeat('LONG', 2048));
+update t1 set x= x + 1;
+select x, left(y, 4), length(y), check_row(row_start, row_end) from t1 for system_time all order by x, y;
+update t1 set y= 'SHORT';
+select x, left(y, 4), length(y), check_row(row_start, row_end) from t1 for system_time all order by x, y;
+drop tables t1;
+
--echo ### Issue tempesta-tech/mariadb#365, bug 7 (duplicate of historical row)
create or replace table t1 (a int primary key, b int)
with system versioning engine myisam;
@@ -286,4 +301,53 @@ delete history from t1;
# cleanup
drop table t1;
+--echo #
+--echo # MDEV-23446 UPDATE does not insert history row if the row is not changed
+--echo #
+replace_result $sys_datatype_expl SYS_DATATYPE;
+eval create table t1 (
+ a int,
+ row_start $sys_datatype_expl as row start invisible,
+ row_end $sys_datatype_expl as row end invisible,
+ period for system_time (row_start, row_end)) with system versioning;
+insert into t1 values (1);
+update t1 set a= 1;
+select *, check_row(row_start, row_end) from t1 for system_time all order by row_end;
+
+--echo # multi-update
+create or replace table t2 like t1;
+create or replace table t3 like t1;
+insert into t2 values (1);
+insert into t3 values (1);
+update t2, t3 set t2.a= 1, t3.a= 1 where t2.a = t3.a;
+select *, check_row(row_start, row_end) from t2 for system_time all order by row_end;
+select *, check_row(row_start, row_end) from t2 for system_time all order by row_end;
+
+# cleanup
+drop tables t1, t2, t3;
+
+--echo #
+--echo # MDEV-24522 Assertion `inited==NONE' fails upon UPDATE on versioned table with unique blob
+--echo
+create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+insert into t1 values (1, 1, 'foo'), (2, 11, 'bar');
+
+update t1 set a = 3 where b <= 9;
+update t1 set a = 3 where b <= 10;
+
+# cleanup
+drop table t1;
+
+create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+create table t2 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning;
+insert into t1 values (1, 1, 'foo'), (2, 11, 'bar');
+insert into t2 values (1, 1, 'foo'), (2, 11, 'bar');
+
+update t1 set a = 3 where b <= 9;
+update t2 set a = 3 where b <= 9;
+update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b = t2.b;
+
+# cleanup
+drop tables t1, t2;
+
source suite/versioning/common_finish.inc;
diff --git a/mysql-test/suite/wsrep/disabled.def b/mysql-test/suite/wsrep/disabled.def
index 11577bfe8b0..991109d72b8 100644
--- a/mysql-test/suite/wsrep/disabled.def
+++ b/mysql-test/suite/wsrep/disabled.def
@@ -10,3 +10,8 @@
#
##############################################################################
+
+mdev_6832: wsrep_provider is read-only for security reasons
+MDEV-23092: wsrep_provider is read-only for security reasons
+wsrep_variables_no_provider: wsrep_provider is read-only for security reasons
+MDEV-22443: it is no longer allowed enable wsrep_on if wsrep_provider is 'none'
diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result
index 1c427b34d2b..97fabd58de6 100644
--- a/mysql-test/suite/wsrep/r/variables.result
+++ b/mysql-test/suite/wsrep/r/variables.result
@@ -1,98 +1,156 @@
-call mtr.add_suppression("WSREP: Initial position was provided by configuration or SST, avoiding override");
-SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
-SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
-
-# MDEV#5534: mysql_tzinfo_to_sql generates wrong query
-#
-# Testing wsrep_replicate_myisam variable.
-SELECT @@session.wsrep_replicate_myisam;
-ERROR HY000: Variable 'wsrep_replicate_myisam' is a GLOBAL variable
-SELECT @@global.wsrep_replicate_myisam;
-@@global.wsrep_replicate_myisam
-0
-SET SESSION wsrep_replicate_myisam= ON;
-ERROR HY000: Variable 'wsrep_replicate_myisam' is a GLOBAL variable and should be set with SET GLOBAL
-SET GLOBAL wsrep_replicate_myisam= ON;
-SET GLOBAL wsrep_replicate_myisam= OFF;
-SET GLOBAL wsrep_provider=none;
+# Correct Galera library found
#
# MDEV#5790: SHOW GLOBAL STATUS LIKE does not show the correct list of
# variables when using "_"
#
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
-SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
+SHOW GLOBAL STATUS LIKE 'wsrep%';
Variable_name Value
+wsrep_local_state_uuid #
+wsrep_protocol_version #
+wsrep_last_committed #
+wsrep_replicated #
+wsrep_replicated_bytes #
+wsrep_repl_keys #
+wsrep_repl_keys_bytes #
+wsrep_repl_data_bytes #
+wsrep_repl_other_bytes #
+wsrep_received #
+wsrep_received_bytes #
+wsrep_local_commits #
+wsrep_local_cert_failures #
+wsrep_local_replays #
+wsrep_local_send_queue #
+wsrep_local_send_queue_max #
+wsrep_local_send_queue_min #
+wsrep_local_send_queue_avg #
+wsrep_local_recv_queue #
+wsrep_local_recv_queue_max #
+wsrep_local_recv_queue_min #
+wsrep_local_recv_queue_avg #
+wsrep_local_cached_downto #
+wsrep_flow_control_paused_ns #
+wsrep_flow_control_paused #
+wsrep_flow_control_sent #
+wsrep_flow_control_recv #
+wsrep_flow_control_active #
+wsrep_flow_control_requested #
+wsrep_cert_deps_distance #
+wsrep_apply_oooe #
+wsrep_apply_oool #
+wsrep_apply_window #
+wsrep_commit_oooe #
+wsrep_commit_oool #
+wsrep_commit_window #
+wsrep_local_state #
wsrep_local_state_comment #
-# Should show nothing.
-SHOW STATUS LIKE 'x';
-Variable_name Value
-SET GLOBAL wsrep_provider=none;
+wsrep_cert_index_size #
+wsrep_causal_reads #
+wsrep_cert_interval #
+wsrep_open_transactions #
+wsrep_open_connections #
+wsrep_incoming_addresses #
+wsrep_cluster_weight #
+wsrep_desync_count #
+wsrep_evs_delayed #
+wsrep_evs_evict_list #
+wsrep_evs_repl_latency #
+wsrep_evs_state #
+wsrep_gcomm_uuid #
+wsrep_gmcast_segment #
+wsrep_applier_thread_count #
+wsrep_cluster_capabilities #
+wsrep_cluster_conf_id #
+wsrep_cluster_size #
+wsrep_cluster_state_uuid #
+wsrep_cluster_status #
+wsrep_connected #
+wsrep_local_bf_aborts #
+wsrep_local_index #
+wsrep_provider_capabilities #
+wsrep_provider_name #
+wsrep_provider_vendor #
+wsrep_provider_version #
+wsrep_ready #
+wsrep_rollbacker_thread_count #
+wsrep_thread_count #
-SHOW STATUS LIKE 'wsrep_local_state_uuid';
+SHOW GLOBAL STATUS LIKE 'wsrep_%';
Variable_name Value
wsrep_local_state_uuid #
-
-SHOW STATUS LIKE 'wsrep_last_committed';
-Variable_name Value
+wsrep_protocol_version #
wsrep_last_committed #
-SET GLOBAL wsrep_provider=none;
-
-#
-# MDEV#6206: wsrep_slave_threads subtracts from max_connections
-#
-call mtr.add_suppression("WSREP: Failed to get provider options");
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
-SELECT @@global.wsrep_slave_threads;
-@@global.wsrep_slave_threads
-1
-SELECT @@global.wsrep_cluster_address;
-@@global.wsrep_cluster_address
-
-SELECT @@global.wsrep_on;
-@@global.wsrep_on
-1
-SHOW STATUS LIKE 'threads_connected';
-Variable_name Value
-Threads_connected 1
-SHOW STATUS LIKE 'wsrep_thread_count';
-Variable_name Value
-wsrep_thread_count 0
-
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
-SELECT @@global.wsrep_cluster_address;
-@@global.wsrep_cluster_address
-
-SELECT @@global.wsrep_on;
-@@global.wsrep_on
-1
-SHOW STATUS LIKE 'threads_connected';
+wsrep_replicated #
+wsrep_replicated_bytes #
+wsrep_repl_keys #
+wsrep_repl_keys_bytes #
+wsrep_repl_data_bytes #
+wsrep_repl_other_bytes #
+wsrep_received #
+wsrep_received_bytes #
+wsrep_local_commits #
+wsrep_local_cert_failures #
+wsrep_local_replays #
+wsrep_local_send_queue #
+wsrep_local_send_queue_max #
+wsrep_local_send_queue_min #
+wsrep_local_send_queue_avg #
+wsrep_local_recv_queue #
+wsrep_local_recv_queue_max #
+wsrep_local_recv_queue_min #
+wsrep_local_recv_queue_avg #
+wsrep_local_cached_downto #
+wsrep_flow_control_paused_ns #
+wsrep_flow_control_paused #
+wsrep_flow_control_sent #
+wsrep_flow_control_recv #
+wsrep_flow_control_active #
+wsrep_flow_control_requested #
+wsrep_cert_deps_distance #
+wsrep_apply_oooe #
+wsrep_apply_oool #
+wsrep_apply_window #
+wsrep_commit_oooe #
+wsrep_commit_oool #
+wsrep_commit_window #
+wsrep_local_state #
+wsrep_local_state_comment #
+wsrep_cert_index_size #
+wsrep_causal_reads #
+wsrep_cert_interval #
+wsrep_open_transactions #
+wsrep_open_connections #
+wsrep_incoming_addresses #
+wsrep_cluster_weight #
+wsrep_desync_count #
+wsrep_evs_delayed #
+wsrep_evs_evict_list #
+wsrep_evs_repl_latency #
+wsrep_evs_state #
+wsrep_gcomm_uuid #
+wsrep_gmcast_segment #
+wsrep_applier_thread_count #
+wsrep_cluster_capabilities #
+wsrep_cluster_conf_id #
+wsrep_cluster_size #
+wsrep_cluster_state_uuid #
+wsrep_cluster_status #
+wsrep_connected #
+wsrep_local_bf_aborts #
+wsrep_local_index #
+wsrep_provider_capabilities #
+wsrep_provider_name #
+wsrep_provider_vendor #
+wsrep_provider_version #
+wsrep_ready #
+wsrep_rollbacker_thread_count #
+wsrep_thread_count #
+SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
Variable_name Value
-Threads_connected 1
-SHOW STATUS LIKE 'wsrep_thread_count';
+wsrep_local_state_comment #
+# Should show nothing.
+SHOW STATUS LIKE 'x';
Variable_name Value
-wsrep_thread_count 0
-
-# Setting wsrep_cluster_address triggers the creation of
-# applier/rollbacker threads.
-SET GLOBAL wsrep_cluster_address= 'gcomm://';
-# Wait for applier thread to get created 1.
-# Wait for applier thread to get created 2.
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-EXPECT_2
-2
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
SELECT @@global.wsrep_cluster_address;
@@global.wsrep_cluster_address
gcomm://
@@ -105,47 +163,112 @@ Threads_connected 1
SHOW STATUS LIKE 'wsrep_thread_count';
Variable_name Value
wsrep_thread_count 2
-
-SET @wsrep_slave_threads_saved= @@global.wsrep_slave_threads;
-SET GLOBAL wsrep_slave_threads= 10;
-# Wait for 9 applier threads to get created.
-SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-EXPECT_10
-10
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_11 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-EXPECT_11
-11
-SHOW STATUS LIKE 'threads_connected';
-Variable_name Value
-Threads_connected 1
-set wsrep_on=0;
-set wsrep_on=1;
-create user test@localhost;
-connect con1,localhost,test;
-set auto_increment_increment=10;
-set wsrep_on=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation
-disconnect con1;
-connection default;
-drop user test@localhost;
-#
-# MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash
-#
-SET @wsrep_sst_auth_saved= @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= 'user:pass';
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-********
-SET @@global.wsrep_sst_auth= '';
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-
-SET @@global.wsrep_sst_auth= NULL;
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-NULL
-SET @@global.wsrep_sst_auth= @wsrep_sst_auth_saved;
-# End of test.
+# variables
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+EXPECT_49
+49
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+EXPECT_49
+49
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+VARIABLE_NAME
+WSREP_AUTO_INCREMENT_CONTROL
+WSREP_CAUSAL_READS
+WSREP_CERTIFICATION_RULES
+WSREP_CERTIFY_NONPK
+WSREP_CLUSTER_ADDRESS
+WSREP_CLUSTER_NAME
+WSREP_CONVERT_LOCK_TO_TRX
+WSREP_DATA_HOME_DIR
+WSREP_DBUG_OPTION
+WSREP_DEBUG
+WSREP_DESYNC
+WSREP_DIRTY_READS
+WSREP_DRUPAL_282555_WORKAROUND
+WSREP_FORCED_BINLOG_FORMAT
+WSREP_GTID_DOMAIN_ID
+WSREP_GTID_MODE
+WSREP_IGNORE_APPLY_ERRORS
+WSREP_LOAD_DATA_SPLITTING
+WSREP_LOG_CONFLICTS
+WSREP_MAX_WS_ROWS
+WSREP_MAX_WS_SIZE
+WSREP_MYSQL_REPLICATION_BUNDLE
+WSREP_NODE_ADDRESS
+WSREP_NODE_INCOMING_ADDRESS
+WSREP_NODE_NAME
+WSREP_NOTIFY_CMD
+WSREP_ON
+WSREP_OSU_METHOD
+WSREP_PATCH_VERSION
+WSREP_PROVIDER
+WSREP_PROVIDER_OPTIONS
+WSREP_RECOVER
+WSREP_REJECT_QUERIES
+WSREP_REPLICATE_MYISAM
+WSREP_RESTART_SLAVE
+WSREP_RETRY_AUTOCOMMIT
+WSREP_SLAVE_FK_CHECKS
+WSREP_SLAVE_THREADS
+WSREP_SLAVE_UK_CHECKS
+WSREP_SR_STORE
+WSREP_SST_AUTH
+WSREP_SST_DONOR
+WSREP_SST_DONOR_REJECTS_QUERIES
+WSREP_SST_METHOD
+WSREP_SST_RECEIVE_ADDRESS
+WSREP_START_POSITION
+WSREP_SYNC_WAIT
+WSREP_TRX_FRAGMENT_SIZE
+WSREP_TRX_FRAGMENT_UNIT
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+VARIABLE_NAME
+WSREP_AUTO_INCREMENT_CONTROL
+WSREP_CAUSAL_READS
+WSREP_CERTIFICATION_RULES
+WSREP_CERTIFY_NONPK
+WSREP_CLUSTER_ADDRESS
+WSREP_CLUSTER_NAME
+WSREP_CONVERT_LOCK_TO_TRX
+WSREP_DATA_HOME_DIR
+WSREP_DBUG_OPTION
+WSREP_DEBUG
+WSREP_DESYNC
+WSREP_DIRTY_READS
+WSREP_DRUPAL_282555_WORKAROUND
+WSREP_FORCED_BINLOG_FORMAT
+WSREP_GTID_DOMAIN_ID
+WSREP_GTID_MODE
+WSREP_IGNORE_APPLY_ERRORS
+WSREP_LOAD_DATA_SPLITTING
+WSREP_LOG_CONFLICTS
+WSREP_MAX_WS_ROWS
+WSREP_MAX_WS_SIZE
+WSREP_MYSQL_REPLICATION_BUNDLE
+WSREP_NODE_ADDRESS
+WSREP_NODE_INCOMING_ADDRESS
+WSREP_NODE_NAME
+WSREP_NOTIFY_CMD
+WSREP_ON
+WSREP_OSU_METHOD
+WSREP_PATCH_VERSION
+WSREP_PROVIDER
+WSREP_PROVIDER_OPTIONS
+WSREP_RECOVER
+WSREP_REJECT_QUERIES
+WSREP_REPLICATE_MYISAM
+WSREP_RESTART_SLAVE
+WSREP_RETRY_AUTOCOMMIT
+WSREP_SLAVE_FK_CHECKS
+WSREP_SLAVE_THREADS
+WSREP_SLAVE_UK_CHECKS
+WSREP_SR_STORE
+WSREP_SST_AUTH
+WSREP_SST_DONOR
+WSREP_SST_DONOR_REJECTS_QUERIES
+WSREP_SST_METHOD
+WSREP_SST_RECEIVE_ADDRESS
+WSREP_START_POSITION
+WSREP_SYNC_WAIT
+WSREP_TRX_FRAGMENT_SIZE
+WSREP_TRX_FRAGMENT_UNIT
diff --git a/mysql-test/suite/wsrep/r/variables_debug.result b/mysql-test/suite/wsrep/r/variables_debug.result
index 886325acf53..9a140f26ec3 100644
--- a/mysql-test/suite/wsrep/r/variables_debug.result
+++ b/mysql-test/suite/wsrep/r/variables_debug.result
@@ -1,20 +1,4 @@
-call mtr.add_suppression("WSREP: Initial position was provided by configuration or SST, avoiding override");
-SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
-SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
-
-# MDEV#5534: mysql_tzinfo_to_sql generates wrong query
-#
-# Testing wsrep_replicate_myisam variable.
-SELECT @@session.wsrep_replicate_myisam;
-ERROR HY000: Variable 'wsrep_replicate_myisam' is a GLOBAL variable
-SELECT @@global.wsrep_replicate_myisam;
-@@global.wsrep_replicate_myisam
-0
-SET SESSION wsrep_replicate_myisam= ON;
-ERROR HY000: Variable 'wsrep_replicate_myisam' is a GLOBAL variable and should be set with SET GLOBAL
-SET GLOBAL wsrep_replicate_myisam= ON;
-SET GLOBAL wsrep_replicate_myisam= OFF;
-SET GLOBAL wsrep_provider=none;
+# Correct Galera library found
#
# MDEV#5790: SHOW GLOBAL STATUS LIKE does not show the correct list of
# variables when using "_"
@@ -66,7 +50,15 @@ wsrep_cert_interval #
wsrep_open_transactions #
wsrep_open_connections #
wsrep_incoming_addresses #
+wsrep_cluster_weight #
wsrep_debug_sync_waiters #
+wsrep_desync_count #
+wsrep_evs_delayed #
+wsrep_evs_evict_list #
+wsrep_evs_repl_latency #
+wsrep_evs_state #
+wsrep_gcomm_uuid #
+wsrep_gmcast_segment #
wsrep_applier_thread_count #
wsrep_cluster_capabilities #
wsrep_cluster_conf_id #
@@ -130,7 +122,15 @@ wsrep_cert_interval #
wsrep_open_transactions #
wsrep_open_connections #
wsrep_incoming_addresses #
+wsrep_cluster_weight #
wsrep_debug_sync_waiters #
+wsrep_desync_count #
+wsrep_evs_delayed #
+wsrep_evs_evict_list #
+wsrep_evs_repl_latency #
+wsrep_evs_state #
+wsrep_gcomm_uuid #
+wsrep_gmcast_segment #
wsrep_applier_thread_count #
wsrep_cluster_capabilities #
wsrep_cluster_conf_id #
@@ -153,73 +153,6 @@ wsrep_local_state_comment #
# Should show nothing.
SHOW STATUS LIKE 'x';
Variable_name Value
-SET GLOBAL wsrep_provider=none;
-
-SHOW STATUS LIKE 'wsrep_local_state_uuid';
-Variable_name Value
-wsrep_local_state_uuid #
-
-SHOW STATUS LIKE 'wsrep_last_committed';
-Variable_name Value
-wsrep_last_committed #
-SET GLOBAL wsrep_provider=none;
-
-#
-# MDEV#6206: wsrep_slave_threads subtracts from max_connections
-#
-call mtr.add_suppression("WSREP: Failed to get provider options");
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
-SELECT @@global.wsrep_slave_threads;
-@@global.wsrep_slave_threads
-1
-SELECT @@global.wsrep_cluster_address;
-@@global.wsrep_cluster_address
-
-SELECT @@global.wsrep_on;
-@@global.wsrep_on
-1
-SHOW STATUS LIKE 'threads_connected';
-Variable_name Value
-Threads_connected 1
-SHOW STATUS LIKE 'wsrep_thread_count';
-Variable_name Value
-wsrep_thread_count 0
-
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
-SELECT @@global.wsrep_cluster_address;
-@@global.wsrep_cluster_address
-
-SELECT @@global.wsrep_on;
-@@global.wsrep_on
-1
-SHOW STATUS LIKE 'threads_connected';
-Variable_name Value
-Threads_connected 1
-SHOW STATUS LIKE 'wsrep_thread_count';
-Variable_name Value
-wsrep_thread_count 0
-
-# Setting wsrep_cluster_address triggers the creation of
-# applier/rollbacker threads.
-SET GLOBAL wsrep_cluster_address= 'gcomm://';
-# Wait for applier thread to get created 1.
-# Wait for applier thread to get created 2.
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-EXPECT_2
-2
-SELECT @@global.wsrep_provider;
-@@global.wsrep_provider
-libgalera_smm.so
SELECT @@global.wsrep_cluster_address;
@@global.wsrep_cluster_address
gcomm://
@@ -232,47 +165,112 @@ Threads_connected 1
SHOW STATUS LIKE 'wsrep_thread_count';
Variable_name Value
wsrep_thread_count 2
-
-SET @wsrep_slave_threads_saved= @@global.wsrep_slave_threads;
-SET GLOBAL wsrep_slave_threads= 10;
-# Wait for 9 applier threads to get created.
-SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-EXPECT_10
-10
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-EXPECT_1
-1
-SELECT VARIABLE_VALUE AS EXPECT_11 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-EXPECT_11
-11
-SHOW STATUS LIKE 'threads_connected';
-Variable_name Value
-Threads_connected 1
-set wsrep_on=0;
-set wsrep_on=1;
-create user test@localhost;
-connect con1,localhost,test;
-set auto_increment_increment=10;
-set wsrep_on=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation
-disconnect con1;
-connection default;
-drop user test@localhost;
-#
-# MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash
-#
-SET @wsrep_sst_auth_saved= @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= 'user:pass';
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-********
-SET @@global.wsrep_sst_auth= '';
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-
-SET @@global.wsrep_sst_auth= NULL;
-SELECT @@global.wsrep_sst_auth;
-@@global.wsrep_sst_auth
-NULL
-SET @@global.wsrep_sst_auth= @wsrep_sst_auth_saved;
-# End of test.
+# variables
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+EXPECT_49
+49
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+EXPECT_49
+49
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+VARIABLE_NAME
+WSREP_AUTO_INCREMENT_CONTROL
+WSREP_CAUSAL_READS
+WSREP_CERTIFICATION_RULES
+WSREP_CERTIFY_NONPK
+WSREP_CLUSTER_ADDRESS
+WSREP_CLUSTER_NAME
+WSREP_CONVERT_LOCK_TO_TRX
+WSREP_DATA_HOME_DIR
+WSREP_DBUG_OPTION
+WSREP_DEBUG
+WSREP_DESYNC
+WSREP_DIRTY_READS
+WSREP_DRUPAL_282555_WORKAROUND
+WSREP_FORCED_BINLOG_FORMAT
+WSREP_GTID_DOMAIN_ID
+WSREP_GTID_MODE
+WSREP_IGNORE_APPLY_ERRORS
+WSREP_LOAD_DATA_SPLITTING
+WSREP_LOG_CONFLICTS
+WSREP_MAX_WS_ROWS
+WSREP_MAX_WS_SIZE
+WSREP_MYSQL_REPLICATION_BUNDLE
+WSREP_NODE_ADDRESS
+WSREP_NODE_INCOMING_ADDRESS
+WSREP_NODE_NAME
+WSREP_NOTIFY_CMD
+WSREP_ON
+WSREP_OSU_METHOD
+WSREP_PATCH_VERSION
+WSREP_PROVIDER
+WSREP_PROVIDER_OPTIONS
+WSREP_RECOVER
+WSREP_REJECT_QUERIES
+WSREP_REPLICATE_MYISAM
+WSREP_RESTART_SLAVE
+WSREP_RETRY_AUTOCOMMIT
+WSREP_SLAVE_FK_CHECKS
+WSREP_SLAVE_THREADS
+WSREP_SLAVE_UK_CHECKS
+WSREP_SR_STORE
+WSREP_SST_AUTH
+WSREP_SST_DONOR
+WSREP_SST_DONOR_REJECTS_QUERIES
+WSREP_SST_METHOD
+WSREP_SST_RECEIVE_ADDRESS
+WSREP_START_POSITION
+WSREP_SYNC_WAIT
+WSREP_TRX_FRAGMENT_SIZE
+WSREP_TRX_FRAGMENT_UNIT
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+VARIABLE_NAME
+WSREP_AUTO_INCREMENT_CONTROL
+WSREP_CAUSAL_READS
+WSREP_CERTIFICATION_RULES
+WSREP_CERTIFY_NONPK
+WSREP_CLUSTER_ADDRESS
+WSREP_CLUSTER_NAME
+WSREP_CONVERT_LOCK_TO_TRX
+WSREP_DATA_HOME_DIR
+WSREP_DBUG_OPTION
+WSREP_DEBUG
+WSREP_DESYNC
+WSREP_DIRTY_READS
+WSREP_DRUPAL_282555_WORKAROUND
+WSREP_FORCED_BINLOG_FORMAT
+WSREP_GTID_DOMAIN_ID
+WSREP_GTID_MODE
+WSREP_IGNORE_APPLY_ERRORS
+WSREP_LOAD_DATA_SPLITTING
+WSREP_LOG_CONFLICTS
+WSREP_MAX_WS_ROWS
+WSREP_MAX_WS_SIZE
+WSREP_MYSQL_REPLICATION_BUNDLE
+WSREP_NODE_ADDRESS
+WSREP_NODE_INCOMING_ADDRESS
+WSREP_NODE_NAME
+WSREP_NOTIFY_CMD
+WSREP_ON
+WSREP_OSU_METHOD
+WSREP_PATCH_VERSION
+WSREP_PROVIDER
+WSREP_PROVIDER_OPTIONS
+WSREP_RECOVER
+WSREP_REJECT_QUERIES
+WSREP_REPLICATE_MYISAM
+WSREP_RESTART_SLAVE
+WSREP_RETRY_AUTOCOMMIT
+WSREP_SLAVE_FK_CHECKS
+WSREP_SLAVE_THREADS
+WSREP_SLAVE_UK_CHECKS
+WSREP_SR_STORE
+WSREP_SST_AUTH
+WSREP_SST_DONOR
+WSREP_SST_DONOR_REJECTS_QUERIES
+WSREP_SST_METHOD
+WSREP_SST_RECEIVE_ADDRESS
+WSREP_START_POSITION
+WSREP_SYNC_WAIT
+WSREP_TRX_FRAGMENT_SIZE
+WSREP_TRX_FRAGMENT_UNIT
diff --git a/mysql-test/suite/sys_vars/r/wsrep_on_basic.result b/mysql-test/suite/wsrep/r/wsrep_on_basic.result
index 735e2d77180..b3186fa674f 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_on_basic.result
+++ b/mysql-test/suite/wsrep/r/wsrep_on_basic.result
@@ -7,10 +7,10 @@ SET @wsrep_on_session_saved = @@session.wsrep_on;
# default
SELECT @@global.wsrep_on;
@@global.wsrep_on
-0
+1
SELECT @@session.wsrep_on;
@@session.wsrep_on
-0
+1
# scope and valid values
SET @@global.wsrep_on=OFF;
diff --git a/mysql-test/suite/wsrep/r/wsrep_variables_no_provider.result b/mysql-test/suite/wsrep/r/wsrep_variables_no_provider.result
new file mode 100644
index 00000000000..ad35dc8dbcd
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_variables_no_provider.result
@@ -0,0 +1,44 @@
+SELECT @@wsrep_on;
+@@wsrep_on
+1
+SET @wsrep_slave_threads_global_saved = @@global.wsrep_slave_threads;
+SET @wsrep_debug_saved = @@global.wsrep_debug;
+SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
+SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
+SET GLOBAL wsrep_provider=none;
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
+ERROR HY000: Incorrect arguments to SET
+SELECT @@session.wsrep_trx_fragment_size;
+@@session.wsrep_trx_fragment_size
+0
+SET GLOBAL wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:100'
+SHOW WARNINGS;
+Level Code Message
+Warning 1231 Cannot set 'wsrep_start_position' because wsrep is switched off or provider is not loaded
+Error 1231 Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:100'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
+SET GLOBAL wsrep_debug=1;
+Warnings:
+Warning 1231 Setting 'wsrep_debug' has no effect because wsrep is switched off
+SELECT @@global.wsrep_debug;
+@@global.wsrep_debug
+NONE
+SET GLOBAL wsrep_slave_threads=5;
+SELECT @@global.wsrep_slave_threads;
+@@global.wsrep_slave_threads
+5
+SET GLOBAL wsrep_desync=1;
+ERROR HY000: WSREP (galera) not started
+SELECT @@global.wsrep_desync;
+@@global.wsrep_desync
+0
+SET SESSION wsrep_trx_fragment_unit='rows';
+ERROR HY000: Incorrect arguments to SET
+SELECT @@session.wsrep_trx_fragment_unit;
+@@session.wsrep_trx_fragment_unit
+rows
+SET @@global.wsrep_slave_threads = @wsrep_slave_threads_global_saved;
+SET @@global.wsrep_debug = @wsrep_debug_saved;
diff --git a/mysql-test/suite/wsrep/r/wsrep_variables_wsrep_off.result b/mysql-test/suite/wsrep/r/wsrep_variables_wsrep_off.result
new file mode 100644
index 00000000000..7cae89eae8e
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_variables_wsrep_off.result
@@ -0,0 +1,39 @@
+SELECT @@wsrep_on;
+@@wsrep_on
+0
+SET @wsrep_slave_threads_global_saved = @@global.wsrep_slave_threads;
+SET @wsrep_debug_saved = @@global.wsrep_debug;
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
+ERROR HY000: Incorrect arguments to SET
+SELECT @@session.wsrep_trx_fragment_size;
+@@session.wsrep_trx_fragment_size
+0
+SET GLOBAL wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+ERROR 42000: Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:100'
+SHOW WARNINGS;
+Level Code Message
+Warning 1231 Cannot set 'wsrep_start_position' because wsrep is switched off or provider is not loaded
+Error 1231 Variable 'wsrep_start_position' can't be set to the value of '12345678-1234-1234-1234-123456789012:100'
+SELECT @@global.wsrep_start_position;
+@@global.wsrep_start_position
+00000000-0000-0000-0000-000000000000:-1
+SET GLOBAL wsrep_debug=1;
+Warnings:
+Warning 1231 Setting 'wsrep_debug' has no effect because wsrep is switched off
+SELECT @@global.wsrep_debug;
+@@global.wsrep_debug
+NONE
+SET GLOBAL wsrep_slave_threads=5;
+SELECT @@global.wsrep_slave_threads;
+@@global.wsrep_slave_threads
+5
+SET GLOBAL wsrep_desync=1;
+ERROR HY000: WSREP (galera) not started
+SELECT @@global.wsrep_desync;
+@@global.wsrep_desync
+0
+SET SESSION wsrep_trx_fragment_unit='rows';
+ERROR HY000: Incorrect arguments to SET
+SELECT @@session.wsrep_trx_fragment_unit;
+@@session.wsrep_trx_fragment_unit
+rows
diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test
index cae2fe5d6db..8b94275a646 100644
--- a/mysql-test/suite/wsrep/t/variables.test
+++ b/mysql-test/suite/wsrep/t/variables.test
@@ -1,28 +1,10 @@
--source include/have_wsrep.inc
--source include/force_restart.inc
--source include/have_innodb.inc
+--source include/galera_no_debug_sync.inc
-call mtr.add_suppression("WSREP: Initial position was provided by configuration or SST, avoiding override");
-
-SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
-SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
-
---echo
---echo # MDEV#5534: mysql_tzinfo_to_sql generates wrong query
---echo #
---echo # Testing wsrep_replicate_myisam variable.
-
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.wsrep_replicate_myisam;
-SELECT @@global.wsrep_replicate_myisam;
-
---error ER_GLOBAL_VARIABLE
-SET SESSION wsrep_replicate_myisam= ON;
-SET GLOBAL wsrep_replicate_myisam= ON;
-
-# Reset it back.
-SET GLOBAL wsrep_replicate_myisam= OFF;
-SET GLOBAL wsrep_provider=none;
+--let $galera_version=26.4.8
+source ../../wsrep/include/check_galera_version.inc;
--echo #
--echo # MDEV#5790: SHOW GLOBAL STATUS LIKE does not show the correct list of
@@ -31,138 +13,27 @@ SET GLOBAL wsrep_provider=none;
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
--replace_column 2 #
-SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
-
---echo # Should show nothing.
-SHOW STATUS LIKE 'x';
-
-# Reset it back.
-SET GLOBAL wsrep_provider=none;
+SHOW GLOBAL STATUS LIKE 'wsrep%';
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
-# The following 2 variables are used by mariabackup
-# SST.
---echo
---replace_column 2 #
-SHOW STATUS LIKE 'wsrep_local_state_uuid';
--echo
--replace_column 2 #
-SHOW STATUS LIKE 'wsrep_last_committed';
-
-# Reset it back.
-SET GLOBAL wsrep_provider=none;
-
---echo
---echo #
---echo # MDEV#6206: wsrep_slave_threads subtracts from max_connections
---echo #
-call mtr.add_suppression("WSREP: Failed to get provider options");
-
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
---replace_regex /.*libgalera.*smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
-SELECT @@global.wsrep_slave_threads;
-SELECT @@global.wsrep_cluster_address;
-SELECT @@global.wsrep_on;
-SHOW STATUS LIKE 'threads_connected';
-SHOW STATUS LIKE 'wsrep_thread_count';
---echo
+SHOW GLOBAL STATUS LIKE 'wsrep_%';
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
---replace_regex /.*libgalera.*smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
-SELECT @@global.wsrep_cluster_address;
-SELECT @@global.wsrep_on;
-SHOW STATUS LIKE 'threads_connected';
-SHOW STATUS LIKE 'wsrep_thread_count';
---echo
-
---echo # Setting wsrep_cluster_address triggers the creation of
---echo # applier/rollbacker threads.
-SET GLOBAL wsrep_cluster_address= 'gcomm://';
-
---echo # Wait for applier thread to get created 1.
---let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
---source include/wait_condition.inc
---echo # Wait for applier thread to get created 2.
---let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
---source include/wait_condition.inc
+--replace_column 2 #
+SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--echo # Should show nothing.
+SHOW STATUS LIKE 'x';
---replace_regex /.*libgalera.*smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
SELECT @@global.wsrep_cluster_address;
SELECT @@global.wsrep_on;
SHOW STATUS LIKE 'threads_connected';
SHOW STATUS LIKE 'wsrep_thread_count';
---echo
-
-SET @wsrep_slave_threads_saved= @@global.wsrep_slave_threads;
-SET GLOBAL wsrep_slave_threads= 10;
-
---echo # Wait for 9 applier threads to get created.
---let $wait_condition = SELECT VARIABLE_VALUE = 10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
---source include/wait_condition.inc
-
-SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_11 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-
-SHOW STATUS LIKE 'threads_connected';
-
-#
-# privileges for wsrep_on
-#
-set wsrep_on=0;
-set wsrep_on=1;
---source include/wait_until_connected_again.inc
-create user test@localhost;
-connect con1,localhost,test;
-set auto_increment_increment=10;
---error ER_SPECIFIC_ACCESS_DENIED_ERROR
-set wsrep_on=0;
-disconnect con1;
-connection default;
-drop user test@localhost;
-
---echo #
---echo # MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash
---echo #
-SET @wsrep_sst_auth_saved= @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= 'user:pass';
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= '';
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= NULL;
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= @wsrep_sst_auth_saved;
-
-# Reset (for mtr internal checks)
-
---disable_query_log
-SET GLOBAL wsrep_slave_threads= @wsrep_slave_threads_saved;
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
-SET GLOBAL wsrep_cluster_address= @wsrep_cluster_address_saved;
-SET GLOBAL wsrep_provider_options= @wsrep_provider_options_saved;
---enable_query_log
---source include/galera_wait_ready.inc
+--echo # variables
---echo # End of test.
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
diff --git a/mysql-test/suite/wsrep/t/variables_debug.test b/mysql-test/suite/wsrep/t/variables_debug.test
index f2c3a0a3b78..f1b0194dee0 100644
--- a/mysql-test/suite/wsrep/t/variables_debug.test
+++ b/mysql-test/suite/wsrep/t/variables_debug.test
@@ -1,29 +1,12 @@
--source include/have_wsrep.inc
--source include/force_restart.inc
--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
--source include/galera_have_debug_sync.inc
-call mtr.add_suppression("WSREP: Initial position was provided by configuration or SST, avoiding override");
-
-SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
-SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
-
---echo
---echo # MDEV#5534: mysql_tzinfo_to_sql generates wrong query
---echo #
---echo # Testing wsrep_replicate_myisam variable.
-
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.wsrep_replicate_myisam;
-SELECT @@global.wsrep_replicate_myisam;
-
---error ER_GLOBAL_VARIABLE
-SET SESSION wsrep_replicate_myisam= ON;
-SET GLOBAL wsrep_replicate_myisam= ON;
-
-# Reset it back.
-SET GLOBAL wsrep_replicate_myisam= OFF;
-SET GLOBAL wsrep_provider=none;
+--let $galera_version=26.4.8
+source ../../wsrep/include/check_galera_version.inc;
--echo #
--echo # MDEV#5790: SHOW GLOBAL STATUS LIKE does not show the correct list of
@@ -32,10 +15,6 @@ SET GLOBAL wsrep_provider=none;
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep%';
@@ -49,128 +28,14 @@ SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
--echo # Should show nothing.
SHOW STATUS LIKE 'x';
-# Reset it back.
-SET GLOBAL wsrep_provider=none;
-
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
-# The following 2 variables are used by mariabackup
-# SST.
---echo
---replace_column 2 #
-SHOW STATUS LIKE 'wsrep_local_state_uuid';
---echo
---replace_column 2 #
-SHOW STATUS LIKE 'wsrep_last_committed';
-
-# Reset it back.
-SET GLOBAL wsrep_provider=none;
-
---echo
---echo #
---echo # MDEV#6206: wsrep_slave_threads subtracts from max_connections
---echo #
-call mtr.add_suppression("WSREP: Failed to get provider options");
-
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
---replace_regex /.*libgalera_smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
-SELECT @@global.wsrep_slave_threads;
SELECT @@global.wsrep_cluster_address;
SELECT @@global.wsrep_on;
SHOW STATUS LIKE 'threads_connected';
SHOW STATUS LIKE 'wsrep_thread_count';
---echo
-
---disable_query_log
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
---enable_query_log
-
---replace_regex /.*libgalera_smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
-SELECT @@global.wsrep_cluster_address;
-SELECT @@global.wsrep_on;
-SHOW STATUS LIKE 'threads_connected';
-SHOW STATUS LIKE 'wsrep_thread_count';
---echo
-
---echo # Setting wsrep_cluster_address triggers the creation of
---echo # applier/rollbacker threads.
-SET GLOBAL wsrep_cluster_address= 'gcomm://';
-
---echo # Wait for applier thread to get created 1.
---let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
---source include/wait_condition.inc
---echo # Wait for applier thread to get created 2.
---let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
---source include/wait_condition.inc
-
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-
---replace_regex /.*libgalera_smm.*/libgalera_smm.so/
-SELECT @@global.wsrep_provider;
-SELECT @@global.wsrep_cluster_address;
-SELECT @@global.wsrep_on;
-SHOW STATUS LIKE 'threads_connected';
-SHOW STATUS LIKE 'wsrep_thread_count';
---echo
-
-SET @wsrep_slave_threads_saved= @@global.wsrep_slave_threads;
-SET GLOBAL wsrep_slave_threads= 10;
-
---echo # Wait for 9 applier threads to get created.
---let $wait_condition = SELECT VARIABLE_VALUE = 10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
---source include/wait_condition.inc
-
-SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_rollbacker_thread_count';
-SELECT VARIABLE_VALUE AS EXPECT_11 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
-
-SHOW STATUS LIKE 'threads_connected';
-
-#
-# privileges for wsrep_on
-#
-set wsrep_on=0;
-set wsrep_on=1;
---source include/wait_until_connected_again.inc
-create user test@localhost;
-connect con1,localhost,test;
-set auto_increment_increment=10;
---error ER_SPECIFIC_ACCESS_DENIED_ERROR
-set wsrep_on=0;
-disconnect con1;
-connection default;
-drop user test@localhost;
-
---echo #
---echo # MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash
---echo #
-SET @wsrep_sst_auth_saved= @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= 'user:pass';
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= '';
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= NULL;
-SELECT @@global.wsrep_sst_auth;
-SET @@global.wsrep_sst_auth= @wsrep_sst_auth_saved;
-
-# Reset (for mtr internal checks)
-
---disable_query_log
-SET GLOBAL wsrep_slave_threads= @wsrep_slave_threads_saved;
-eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
-SET GLOBAL wsrep_cluster_address= @wsrep_cluster_address_saved;
-SET GLOBAL wsrep_provider_options= @wsrep_provider_options_saved;
---enable_query_log
---source include/galera_wait_ready.inc
+--echo # variables
---echo # End of test.
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+SELECT COUNT(*) AS EXPECT_49 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%";
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
+SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep%" ORDER BY VARIABLE_NAME;
diff --git a/mysql-test/suite/wsrep/t/wsrep_on_basic.opt b/mysql-test/suite/wsrep/t/wsrep_on_basic.opt
new file mode 100644
index 00000000000..9da4dd32881
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_on_basic.opt
@@ -0,0 +1 @@
+--wsrep-provider=$WSREP_PROVIDER --binlog_format=ROW --wsrep-cluster-address=gcomm://
diff --git a/mysql-test/suite/sys_vars/t/wsrep_on_basic.test b/mysql-test/suite/wsrep/t/wsrep_on_basic.test
index 229d771b5e7..98062dbec83 100644
--- a/mysql-test/suite/sys_vars/t/wsrep_on_basic.test
+++ b/mysql-test/suite/wsrep/t/wsrep_on_basic.test
@@ -1,4 +1,6 @@
--source include/have_wsrep.inc
+--source include/have_wsrep_provider.inc
+--source include/have_innodb.inc
--echo #
--echo # wsrep_on
diff --git a/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.cnf b/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.cnf
new file mode 100644
index 00000000000..b73146d26e7
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.cnf
@@ -0,0 +1,12 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+
+[mysqld.1]
+wsrep-on=ON
+binlog-format=ROW
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address='gcomm://'
+#galera_port=@OPT.port
+#ist_port=@OPT.port
+#sst_port=@OPT.port
+
diff --git a/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.test b/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.test
new file mode 100644
index 00000000000..b44c9c5ebc8
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_variables_no_provider.test
@@ -0,0 +1,38 @@
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+SELECT @@wsrep_on;
+
+SET @wsrep_slave_threads_global_saved = @@global.wsrep_slave_threads;
+SET @wsrep_debug_saved = @@global.wsrep_debug;
+SET @wsrep_provider_options_saved= @@global.wsrep_provider_options;
+SET @wsrep_cluster_address_saved= @@global.wsrep_cluster_address;
+
+SET GLOBAL wsrep_provider=none;
+
+--error ER_WRONG_ARGUMENTS
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
+SELECT @@session.wsrep_trx_fragment_size;
+--error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+SHOW WARNINGS;
+SELECT @@global.wsrep_start_position;
+SET GLOBAL wsrep_debug=1;
+SELECT @@global.wsrep_debug;
+SET GLOBAL wsrep_slave_threads=5;
+SELECT @@global.wsrep_slave_threads;
+--error ER_WRONG_ARGUMENTS
+SET GLOBAL wsrep_desync=1;
+SELECT @@global.wsrep_desync;
+--error ER_WRONG_ARGUMENTS
+SET SESSION wsrep_trx_fragment_unit='rows';
+SELECT @@session.wsrep_trx_fragment_unit;
+
+--disable_query_log
+eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
+SET GLOBAL wsrep_cluster_address= @wsrep_cluster_address_saved;
+SET GLOBAL wsrep_provider_options= @wsrep_provider_options_saved;
+--source include/galera_wait_ready.inc
+SET @@global.wsrep_slave_threads = @wsrep_slave_threads_global_saved;
+SET @@global.wsrep_debug = @wsrep_debug_saved;
+--enable_query_log
diff --git a/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.cnf b/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.cnf
new file mode 100644
index 00000000000..2e66b1ef23c
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.cnf
@@ -0,0 +1,12 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+
+[mysqld]
+wsrep-on=OFF
+
+[mysqld.1]
+wsrep-on=OFF
+#galera_port=@OPT.port
+#ist_port=@OPT.port
+#sst_port=@OPT.port
+
diff --git a/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.test b/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.test
new file mode 100644
index 00000000000..4a9cd2bad5f
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_variables_wsrep_off.test
@@ -0,0 +1,30 @@
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+SELECT @@wsrep_on;
+
+SET @wsrep_slave_threads_global_saved = @@global.wsrep_slave_threads;
+SET @wsrep_debug_saved = @@global.wsrep_debug;
+
+--error ER_WRONG_ARGUMENTS
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
+SELECT @@session.wsrep_trx_fragment_size;
+--error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL wsrep_start_position='12345678-1234-1234-1234-123456789012:100';
+SHOW WARNINGS;
+SELECT @@global.wsrep_start_position;
+SET GLOBAL wsrep_debug=1;
+SELECT @@global.wsrep_debug;
+SET GLOBAL wsrep_slave_threads=5;
+SELECT @@global.wsrep_slave_threads;
+--error ER_WRONG_ARGUMENTS
+SET GLOBAL wsrep_desync=1;
+SELECT @@global.wsrep_desync;
+--error ER_WRONG_ARGUMENTS
+SET SESSION wsrep_trx_fragment_unit='rows';
+SELECT @@session.wsrep_trx_fragment_unit;
+
+--disable_query_log
+SET @@global.wsrep_slave_threads = @wsrep_slave_threads_global_saved;
+SET @@global.wsrep_debug = @wsrep_debug_saved;
+--enable_query_log
diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests
index 30171bfe084..df4a1e74447 100644
--- a/mysql-test/unstable-tests
+++ b/mysql-test/unstable-tests
@@ -23,190 +23,174 @@
#
##############################################################################
#
-# Based on bb-10.4-release 80c951ce2875aac521b82323b5b6ebf638593445
-# Sat Oct 31 21:06:49 2020 +0100 : Merge branch '10.3' into 10.4
+# Based on bb-10.4-release d348555cd (MDEV-23328 Server hang due to Galera lock conflict resolution)
+# for main suite changes and failures, and
+# bb-10.4-release 80c951ce2875aac521b82323b5b6ebf638593445
+# for the rest
+
-main.alter_table : Modified in 10.4.16
main.alter_table_trans : MDEV-12084 - timeout
main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result
-main.aria_icp_debug : Added in 10.4.16
main.auth_named_pipe : MDEV-14724 - System error 2
-main.backup_locks : Modified in 10.4.16
+main.auto_increment_ranges_innodb : Modified in 10.4.18
main.backup_stages : MDEV-23401 - Bad file descriptor
main.binary_to_hex : MDEV-20211 - Wrong result
-main.blackhole : Modified in 10.4.16
-main.bootstrap_innodb : Added in 10.4.16
+main.check_constraint : Modified in 10.4.18
main.connect : MDEV-17282 - Wrong result
main.connect-abstract : MDEV-20162 - Could not execute 'check-testcase'
main.connect2 : MDEV-13885 - Server crash
-main.count_distinct2 : MDEV-11768 - timeout
+main.create : Modified in 10.4.18
main.create_delayed : MDEV-10605 - failed with timeout
main.create_drop_event : MDEV-16271 - Wrong result
-main.ctype_binary : MDEV-24080 - Data too long for column; include file modified in 10.4.16
-main.ctype_cp1251 : Include file modified in 10.4.16
+main.cte_nonrecursive : Modified in 10.4.18
+main.cte_nonrecursive_not_embedded : Added in 10.4.18
+main.cte_recursive : Modified in 10.4.18
+main.ctype_binary : MDEV-24080 - Data too long for column
main.ctype_cp932_binlog_stm : MDEV-20534 - Wrong result
-main.ctype_filename : Modified in 10.4.16
-main.ctype_latin1 : Include file modified in 10.4.16
-main.ctype_ucs : MDEV-17681 - Data too long for column; include file modified in 10.4.16
+main.ctype_ucs : MDEV-17681 - Data too long for column
main.ctype_upgrade : MDEV-16945 - Error upon mysql_upgrade
main.ctype_utf16 : MDEV-10675: timeout or extra warnings
main.ctype_utf16le : MDEV-10675: timeout or extra warnings
-main.ctype_utf8 : Modified in 10.4.16
-main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison
+main.ctype_utf8mb4 : Modified in 10.4.18
+main.ctype_utf8mb4_heap : Include file modified in 10.4.18
+main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison; include file modified in 10.4.18
+main.ctype_utf8mb4_myisam : Include file modified in 10.4.18
main.debug_sync : MDEV-10607 - internal error
main.delayed : MDEV-20961 - Assertion failure
-main.derived_cond_pushdown : MDEV-20532 - Floating point differences
-main.derived_opt : MDEV-11768 - timeout
+main.derived_cond_pushdown : MDEV-20532 - Floating point differences; modified in 10.4.18
main.dirty_close : MDEV-19368 - mysqltest failed but provided no output
main.distinct : MDEV-14194 - Crash
main.drop_bad_db_type : MDEV-15676 - Wrong result
main.dyncol : MDEV-19455 - Extra warning
-main.empty_server_name-8224 : Modified in 10.4.16
-main.errors : Modified in 10.4.16
+main.empty_string_literal : Modified in 10.4.18
main.events_2 : MDEV-13277 - Crash
main.events_bugs : MDEV-12892 - Crash
main.events_restart : MDEV-12236 - Server shutdown problem
main.events_slowlog : MDEV-12821 - Wrong result
-main.fast_prefix_index_fetch_innodb : Modified in 10.4.16
main.flush : MDEV-19368 - mysqltest failed but provided no output
main.flush_ssl : MDEV-21276 - Aria recovery failure
-main.func_gconcat : MDEV-21379 - Valgrind warnings
-main.func_json : Modified in 10.4.16
-main.func_math : MDEV-20966 - Wrong error code; modified in 10.4.16
-main.func_test : Modified in 10.4.16
+main.func_gconcat : MDEV-21379 - Valgrind warnings; modified in 10.4.18
+main.func_like : Modified in 10.4.18
+main.func_math : MDEV-20966 - Wrong error code
main.gis : MDEV-13411 - wrong result on P8
+main.gis-json : Modified in 10.4.18
main.gis_notembedded : MDEV-21264 - Wrong result with non-default charset
-main.grant : Modified in 10.4.16
-main.grant5 : Modified in 10.4.16
+main.group_by : Modified in 10.4.18
main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown
-main.implicit_commit : Modified in 10.4.16
main.index_intersect_innodb : MDEV-10643 - failed with timeout
main.index_merge_innodb : MDEV-7142 - Plan mismatch
-main.information_schema : Modified in 10.4.16
-main.innodb_ext_key : Modified in 10.4.16
+main.information_schema : Modified in 10.4.18
main.innodb_icp : MDEV-20168 - Wrong execution plans
-main.innodb_icp_debug : Added in 10.4.16
-main.invisible_field : Modified in 10.4.16
+main.innodb_mrr_cpk : MDEV-24737 - Server crash
main.invisible_field_grant_completely : MDEV-22254 - Syscall param write points to uninitialised bytes
main.ipv4_and_ipv6 : MDEV-20964 - Wrong result
main.ipv6 : MDEV-20964 - Wrong result
main.join_cache : MDEV-17743 - Bad address from storage engine MyISAM
-main.kill : Modified in 10.4.16
+main.kill : MDEV-24801 - Wrong errno on reap; modified in 10.4.18
main.kill-2 : MDEV-13257 - Wrong result
main.kill_processlist-6619 : MDEV-10793 - Wrong result
-main.limit_rows_examined : Modified in 10.4.16
main.loaddata : MDEV-19368 - mysqltest failed but provided no output
main.locale : MDEV-20521 - Missing warning
-main.lock_view : Added in 10.4.16
+main.lock_tables_lost_commit : MDEV-24624 - Timeout
+main.lock_user : Modified in 10.4.18
+main.lock_view : Modified in 10.4.18
main.log_slow : MDEV-13263 - Wrong result
-main.log_tables : Modified in 10.4.16
main.log_tables-big : MDEV-13408 - wrong result
main.log_tables_upgrade : MDEV-20962 - Wrong result
main.mdev-504 : MDEV-15171 - warning
main.mdev375 : MDEV-10607 - sporadic "can't connect"
main.merge : MDEV-10607 - sporadic "can't connect"
-main.multi_update_big : Modified in 10.4.16
-main.myisam_icp_debug : Added in 10.4.16
-main.myisam_repair : Added in 10.4.16
+main.myisam : Modified in 10.4.18
main.mysql : MDEV-20156 - Wrong result
main.mysql_client_test : MDEV-19369 - error: 5888, status: 23, errno: 2; MDEV-19511 - Big endian issue
main.mysql_client_test_comp : MDEV-16641 - Error in exec
main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed
main.mysql_cp932 : MDEV-21275 - Wrong result
-main.mysql_upgrade : MDEV-20161 - Wrong result; MDEV-20166 - FATAL ERROR: Upgrade failed; modified in 10.4.16
+main.mysql_upgrade : MDEV-20161 - Wrong result; MDEV-20166 - FATAL ERROR: Upgrade failed; modified in 10.4.18
main.mysql_upgrade-6984 : MDEV-22514 - Wrong result
main.mysql_upgrade_no_innodb : MDEV-20537 - Wrong result
main.mysql_upgrade_noengine : MDEV-14355 - Wrong result
main.mysql_upgrade_view : MDEV-20161 - Wrong result; MDEV-23392 - Wrong result
main.mysqladmin : MDEV-20535 - Wrong result
-main.mysqlbinlog_row_minimal : Modified in 10.4.16
main.mysqlcheck : MDEV-20164 - Wrong result
-main.mysqld--help : Modified in 10.4.16
main.mysqld_option_err : MDEV-21236 - Wrong error; MDEV-21571 - Crash on bootstrap
-main.mysqldump : MDEV-14800 - Stack smashing detected; modified in 10.4.16
+main.mysqldump : Modified in 10.4.18
main.mysqldump-max : MDEV-21272 - Wrong result
-main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug
+main.mysqldump-system : Modified in 10.4.18
main.mysqlshow : MDEV-20965 - Wrong result
main.mysqlslap : MDEV-11801 - timeout
main.mysqltest : MDEV-13887 - Wrong result
-main.named_pipe : Modified in 10.4.16
main.old-mode : MDEV-19373 - Wrong result
main.openssl_6975 : MDEV-17184 - Failures with OpenSSL 1.1.1
-main.order_by : Modified in 10.4.16
+main.order_by : Modified in 10.4.18
main.order_by_optimizer_innodb : MDEV-10683 - Wrong result
-main.parser : Modified in 10.4.16
-main.partition : Modified in 10.4.16
+main.parser : Modified in 10.4.18
main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock
main.partition_innodb : MDEV-20169 - Wrong result; MDEV-23427 - Server crash
main.partition_innodb_plugin : MDEV-12901 - Valgrind warnings
main.partition_innodb_semi_consistent : MDEV-19411 - Failed to start mysqld.1
main.plugin_auth : MDEV-20957 - Upgrade file was not properly created
main.plugin_auth_qa_2 : MDEV-20165 - Wrong result
-main.plugin_innodb : Modified in 10.4.16
-main.pool_of_threads : MDEV-18135 - SSL error: key too small; modified in 10.4.16
-main.precedence : Added in 10.4.16
-main.precedence_bugs : Added in 10.4.16
-main.processlist_notembedded : Modified in 10.4.16
+main.pool_of_threads : MDEV-18135 - SSL error: key too small
+main.precedence : Modified in 10.4.18
+main.processlist_notembedded : MDEV-23752 - Not explainable command; modified in 10.4.18
main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count
main.ps_error : MDEV-24079 - Memory not freed
-main.query_cache : MDEV-16180 - Wrong result
+main.ps_show_log : Added in 10.4.18
+main.query_cache : MDEV-16180 - Wrong result; modified in 10.4.18
main.query_cache_debug : MDEV-15281 - Query cache is disabled
-main.range : Modified in 10.4.16
+main.range : Modified in 10.4.18
main.range_innodb : MDEV-23371 - Server crash
+main.range_notembedded : Added in 10.4.18
main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away
-main.rowid_filter : Modified in 10.4.16
main.rowid_filter_innodb : MDEV-20538 - Wrong result
-main.rowid_filter_innodb_debug : Added in 10.4.16
-main.rowid_filter_myisam_debug : Added in 10.4.16
main.select : MDEV-20532 - Floating point differences
main.select_jcl6 : MDEV-20532 - Floating point differences
main.select_pkeycache : MDEV-20532 - Floating point differences
+main.set_password : Modified in 10.4.18
main.set_statement : MDEV-13183 - Wrong result
-main.set_statement_notembedded : MDEV-19414 - Wrong result; modified in 10.4.16
+main.set_statement_notembedded : MDEV-19414 - Wrong result
main.shm : MDEV-12727 - Mismatch, ERROR 2013
main.show_explain : MDEV-10674 - Wrong result code
-main.sp : MDEV-7866 - Mismatch; modified in 10.4.16
-main.sp-destruct : Modified in 10.4.16
+main.skip_grants : Modified in 10.4.18
+main.sp : MDEV-7866 - Mismatch; modified in 10.4.18
main.sp-security : MDEV-10607 - sporadic "can't connect"
+main.sp-ucs2 : Modified in 10.4.18
main.sp_notembedded : MDEV-10607 - internal error
main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1
main.ssl_7937 : MDEV-20958 - Wrong result
main.ssl_ca : MDEV-10895 - SSL connection error on Power
main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1
main.ssl_timeout : MDEV-11244 - Crash
+main.stat_tables : Modified in 10.4.18
main.stat_tables_par_innodb : MDEV-14155 - Wrong rounding
main.status : MDEV-13255 - Wrong result
main.subselect : MDEV-20551 - Valgrind failure
-main.subselect4 : Modified in 10.4.16
-main.subselect_innodb : MDEV-10614 - Wrong result; modified in 10.4.16
-main.sum_distinct-big : Modified in 10.4.16
+main.subselect4 : Modified in 10.4.18
+main.subselect_innodb : MDEV-10614 - Wrong result
+main.table_value_constr : Modified in 10.4.18
main.tc_heuristic_recover : MDEV-14189 - Wrong result
-main.temp_table_symlink : MDEV-24058 - Wrong error code; added in 10.4.16
-main.type_blob : MDEV-15195 - Wrong result; modified in 10.4.16
-main.type_date : Modified in 10.4.16
-main.type_datetime : Modified in 10.4.16
+main.temp_table_symlink : MDEV-24058 - Wrong error code
+main.type_blob : MDEV-15195 - Wrong result
main.type_datetime_hires : MDEV-10687 - Timeout
-main.type_float : MDEV-20532 - Floating point differences; modified in 10.4.16
-main.type_newdecimal : MDEV-20532 - Floating point differences; modified in 10.4.16
+main.type_float : MDEV-20532 - Floating point differences
+main.type_newdecimal : MDEV-20532 - Floating point differences
main.type_ranges : MDEV-20532 - Floating point differences
main.type_temporal_innodb : MDEV-24025 - Wrong result
-main.type_time : Modified in 10.4.16
-main.udf : Modified in 10.4.16
-main.upgrade_MDEV-19650 : Re-enabled in 10.4.16
+main.type_year : Modified in 10.4.18
+main.union : Modified in 10.4.18
+main.user_limits : Modified in 10.4.18
main.userstat : MDEV-12904 - SSL errors
-main.view : Modified in 10.4.16
+main.view : Modified in 10.4.18
main.wait_timeout : MDEV-19023 - Lost connection to MySQL server during query
-main.win : Modified in 10.4.16
-main.windows_debug : Added in 10.4.16
-main.xa : MDEV-11769 - lock wait timeout
+main.xa : MDEV-11769 - lock wait timeout; modified in 10.4.18
#-----------------------------------------------------------------------
-archive.archive-big : MDEV-20167 - Wrong error code
-archive.archive_bitfield : MDEV-11771 - table is marked as crashed
-archive.archive_symlink : MDEV-12170 - unexpected error on rmdir
-archive.discover : MDEV-10510 - Table is marked as crashed
-archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug
+archive.archive-big : MDEV-20167 - Wrong error code
+archive.archive_bitfield : MDEV-11771 - table is marked as crashed
+archive.archive_symlink : MDEV-12170 - unexpected error on rmdir
+archive.discover : MDEV-10510 - Table is marked as crashed
#-----------------------------------------------------------------------
@@ -214,56 +198,45 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed
#-----------------------------------------------------------------------
-binlog.binlog_commit_wait : MDEV-10150 - Mismatch
-binlog.binlog_innodb : MDEV-22516 - Wrong result
-binlog.binlog_ioerr : MDEV-20159 - Assertion failure
-binlog.binlog_killed : MDEV-12925 - Wrong result
-binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown
-binlog.binlog_mysqlbinlog_row : Modified in 10.4.16
-binlog.binlog_mysqlbinlog_row_frag : Modified in 10.4.16
-binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ
-binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ
-binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown
-binlog.binlog_recover_checksum_error : Added in 10.4.16
-binlog.binlog_row_binlog : MDEV-23402 - Wrong result
-binlog.binlog_show_binlog_event_random_pos : Modified in 10.4.16
-binlog.binlog_stm_binlog : MDEV-20412 - Wrong result
-binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result
-binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint
-binlog.flashback-largebinlog : MDEV-19764 - Out of memory
-binlog.load_data_stm_view : MDEV-16948 - Wrong result
-binlog.show_concurrent_rotate : MDEV-20215 - Wrong result
-
-#-----------------------------------------------------------------------
-
-binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint
-binlog_encryption.encrypted_master : MDEV-23637 - Assertion failure; MDEV-14201 - Extra warnings
-binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log
-binlog_encryption.encrypted_slave : MDEV-18135 - SSL error: key too small
-binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed
-binlog_encryption.multisource : MDEV-21289 - Wrong error code
-binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash
-binlog_encryption.rpl_checksum : MDEV-16951 - Wrong result
-binlog_encryption.rpl_corruption : MDEV-20159 - Assertion failure; MDEV-20953 - Wrong error code
-binlog_encryption.rpl_gtid_basic : MDEV-16947 - Server failed to start
-binlog_encryption.rpl_incident : MDEV-21569 - mutex: LOCK_global_system_variables unlocking
-binlog_encryption.rpl_loadfile : MDEV-16645 - Timeout in include
-binlog_encryption.rpl_mixed_binlog_max_cache_size : MDEV-20956 - Incorrect checksum for freed object
-binlog_encryption.rpl_parallel : MDEV-10653 - Timeout in include
-binlog_encryption.rpl_parallel_ignored_errors : MDEV-22471 - Slave crash
-binlog_encryption.rpl_relayrotate : MDEV-15194 - Timeout
-binlog_encryption.rpl_semi_sync : MDEV-11673 - Valgrind
-binlog_encryption.rpl_skip_replication : MDEV-13571 - Unexpected warning; MDEV-20573 - Wrong result
-binlog_encryption.rpl_ssl : MDEV-14507 - Timeouts
-binlog_encryption.rpl_stm_relay_ign_space : MDEV-19375 - Test assertion failed
-binlog_encryption.rpl_temporal_format_default_to_default : MDEV-21273 - Timeout
-binlog_encryption.rpl_temporal_format_mariadb53_to_mysql56 : MDEV-20159 - Assertion failure
-binlog_encryption.rpl_typeconv : MDEV-14362 - Lost connection to MySQL server during query
-
-#-----------------------------------------------------------------------
-
-compat/oracle.parser : Modified in 10.4.16
-compat/oracle.sp-package : Modified in 10.4.16
+binlog.binlog_commit_wait : MDEV-10150 - Mismatch
+binlog.binlog_innodb : MDEV-22516 - Wrong result
+binlog.binlog_killed : MDEV-12925 - Wrong result
+binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown
+binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ
+binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ
+binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown
+binlog.binlog_row_binlog : MDEV-23402 - Wrong result
+binlog.binlog_stm_binlog : MDEV-20412 - Wrong result
+binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result
+binlog.binlog_xa_recover : MDEV-12908 - Extra checkpoint
+binlog.flashback-largebinlog : MDEV-19764 - Out of memory
+binlog.load_data_stm_view : MDEV-16948 - Wrong result
+binlog.show_concurrent_rotate : MDEV-20215 - Wrong result
+
+#-----------------------------------------------------------------------
+
+binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint
+binlog_encryption.encrypted_master : MDEV-23637 - Assertion failure; MDEV-14201 - Extra warnings
+binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log
+binlog_encryption.encrypted_slave : MDEV-18135 - SSL error: key too small
+binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed
+binlog_encryption.multisource : MDEV-21289 - Wrong error code
+binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash
+binlog_encryption.rpl_checksum : MDEV-16951 - Wrong result
+binlog_encryption.rpl_corruption : MDEV-20953 - Wrong error code
+binlog_encryption.rpl_gtid_basic : MDEV-16947 - Server failed to start
+binlog_encryption.rpl_incident : MDEV-21569 - mutex: LOCK_global_system_variables unlocking
+binlog_encryption.rpl_loadfile : MDEV-16645 - Timeout in include
+binlog_encryption.rpl_mixed_binlog_max_cache_size : MDEV-20956 - Incorrect checksum for freed object
+binlog_encryption.rpl_parallel : MDEV-10653 - Timeout in include
+binlog_encryption.rpl_parallel_ignored_errors : MDEV-22471 - Slave crash
+binlog_encryption.rpl_relayrotate : MDEV-15194 - Timeout
+binlog_encryption.rpl_semi_sync : MDEV-11673 - Valgrind
+binlog_encryption.rpl_skip_replication : MDEV-13571 - Unexpected warning; MDEV-20573 - Wrong result
+binlog_encryption.rpl_ssl : MDEV-14507 - Timeouts
+binlog_encryption.rpl_stm_relay_ign_space : MDEV-19375 - Test assertion failed
+binlog_encryption.rpl_temporal_format_default_to_default : MDEV-21273 - Timeout
+binlog_encryption.rpl_typeconv : MDEV-14362 - Lost connection to MySQL server during query
#-----------------------------------------------------------------------
@@ -274,7 +247,6 @@ connect.part_file : MDEV-18135 - SSL error: key too small
connect.part_table : MDEV-18135 - SSL error: key too small
connect.pivot : MDEV-14803 - Failed to discover table
connect.secure_file_priv : MDEV-18135 - SSL error: key too small
-connect.updelx : Modified in 10.4.16
connect.vcol : MDEV-12374 - Fails on Windows
connect.zip : MDEV-13884 - Wrong result
@@ -284,9 +256,7 @@ disks.disks_notembedded : MDEV-21587 - Wrong result
#-----------------------------------------------------------------------
-encryption.corrupted_during_recovery : MDEV-20159 - Assertion failure
-encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded; modified in 10.4.16
-encryption.create_or_replace_big : Added in 10.4.16
+encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded
encryption.debug_key_management : MDEV-13841 - Timeout
encryption.encrypt_and_grep : MDEV-13765 - Wrong result
encryption.innochecksum : MDEV-13644 - Assertion failure
@@ -298,11 +268,8 @@ encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait
encryption.innodb-force-corrupt : MDEV-17286 - SSL error
encryption.innodb-missing-key : MDEV-14728 - SSL error
encryption.innodb-page_encryption : MDEV-10641 - mutex problem
-encryption.innodb-page_encryption_compression : Modified in 10.4.16
-encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart; modified in 10.4.16
+encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart
encryption.innodb-read-only : MDEV-16563 - Crash on startup
-encryption.innodb-redo-badkey : MDEV-12898 - Server hang on startup
-encryption.innodb-redo-nokeys : MDEV-20159 - Assertion failure
encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition
encryption.innodb-spatial-index : MDEV-13746 - Wrong result
encryption.innodb_encrypt_key_rotation_age : MDEV-19763 - Timeout
@@ -313,7 +280,6 @@ encryption.innodb_encryption : MDEV-14728 - Unable to get ce
encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure
encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result
encryption.innodb_encryption_filekeys : MDEV-15673 - Timeout
-encryption.innodb_encryption_is : MDEV-12898 - Server hang on startup
encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash
encryption.innodb_encryption_tables : MDEV-17339 - Crash on restart
encryption.innodb_first_page : MDEV-10689 - Crash
@@ -321,7 +287,6 @@ encryption.innodb_onlinealter_encryption : MDEV-17287 - SIGABRT on serve
encryption.innodb_scrub : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+)
encryption.innodb_scrub_background : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+)
encryption.innodb_scrub_compressed : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+)
-encryption.tempfiles_encrypted : Modified in 10.4.16
#-----------------------------------------------------------------------
@@ -338,13 +303,12 @@ engines/rr_trx.* : MDEV-10998 - Not maintained
#-----------------------------------------------------------------------
-federated.federated_bug_35333 : MDEV-13410 - Wrong result
-federated.federated_bug_585688 : MDEV-14805 - Server crash, MDEV-12907 - Valgrind
-federated.federated_innodb : MDEV-10617 - Wrong checksum
-federated.federated_partition : MDEV-10417 - Fails on Mips
-federated.federated_transactions : MDEV-10617 - Wrong checksum
-federated.federatedx : MDEV-10617 - Wrong checksum
-federated.federatedx_create_handlers : Modified in 10.4.16
+federated.federated_bug_35333 : MDEV-13410 - Wrong result
+federated.federated_bug_585688 : MDEV-14805 - Server crash, MDEV-12907 - Valgrind
+federated.federated_innodb : MDEV-10617 - Wrong checksum
+federated.federated_partition : MDEV-10417 - Fails on Mips
+federated.federated_transactions : MDEV-10617 - Wrong checksum
+federated.federatedx : MDEV-10617 - Wrong checksum
#-----------------------------------------------------------------------
@@ -371,18 +335,12 @@ galera_3nodes.* : Suite is not stable yet
#-----------------------------------------------------------------------
-gcol.gcol_keys_innodb : Include file modified in 10.4.16
-gcol.gcol_keys_myisam : Include file modified in 10.4.16
-gcol.gcol_partition_innodb : Include file modified in 10.4.16
-gcol.gcol_update : Include file modified in 10.4.16
-gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion
-gcol.innodb_virtual_debug : MDEV-23404 - Server crash
-gcol.innodb_virtual_debug_purge : Include file modified in 10.4.16
-gcol.innodb_virtual_fk : MDEV-20640 - Assertion failure; modified in 10.4.16
-gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure
-gcol.innodb_virtual_index : Modified in 10.4.16
-gcol.innodb_virtual_purge : MDEV-22952 - Lock wait timeout; include file modified in 10.4.16
-gcol.main_alter_table : MDEV-23403 - Wrong result
+gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion
+gcol.innodb_virtual_debug : MDEV-23111 - Server crash
+gcol.innodb_virtual_fk : MDEV-20640 - Assertion failure
+gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure
+gcol.innodb_virtual_purge : MDEV-22952 - Lock wait timeout
+gcol.main_alter_table : MDEV-23403 - Wrong result
#-----------------------------------------------------------------------
@@ -390,31 +348,21 @@ innodb.101_compatibility : MDEV-13891 - Wrong result
innodb.alter_copy : MDEV-16181 - Assertion failure
innodb.alter_crash : MDEV-16944 - The process cannot access the file
innodb.alter_large_dml : MDEV-20148 - Debug sync point wait timed out
-innodb.alter_table : Modified in 10.4.16
innodb.binlog_consistent : MDEV-10618 - Server fails to start
innodb.blob-crash : MDEV-20481 - Crash during recovery
-innodb.create-index : MDEV-20159 - Assertion failure
-innodb.default_row_format_compatibility : MDEV-20159 - Assertion failure
innodb.doublewrite : MDEV-12905 - Server crash
-innodb.foreign-keys : Modified in 10.4.16
-innodb.foreign_key : Modified in 10.4.16
innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed
innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - Checksum mismatch
innodb.ibuf_not_empty : MDEV-19021 - Wrong result
-innodb.innodb : Modified in 10.4.16
innodb.innodb-32k-crash : MDEV-20194 - Extra warnings
-innodb.innodb-64k : Modified in 10.4.16
innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup
innodb.innodb-alter-debug : MDEV-13182 - InnoDB: adjusting FSP_SPACE_FLAGS
innodb.innodb-alter-table : MDEV-10619 - Testcase timeout
innodb.innodb-bigblob : MDEV-18655 - ASAN unknown crash
innodb.innodb-blob : MDEV-12053 - Client crash
innodb.innodb-change-buffer-recovery : MDEV-19115 - Lost connection to MySQL server during query
-innodb.innodb-dict : MDEV-20159 - Assertion failure
innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown
innodb.innodb-get-fk : MDEV-13276 - Server crash
-innodb.innodb-index : Include file modified in 10.4.16
-innodb.innodb-index-debug : Include file modified in 10.4.16
innodb.innodb-index-online : MDEV-14809 - Cannot save statistics
innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure
innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result
@@ -422,15 +370,14 @@ innodb.innodb-page_compression_snappy : MDEV-13644 - Assertion failure
innodb.innodb-page_compression_tables : MDEV-13644 - Assertion failure
innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem
innodb.innodb-table-online : MDEV-13894 - Wrong result
-innodb.innodb-timeout : MDEV-20159 - Assertion failure
+innodb.innodb-ucs2 : MDEV-24505 - Assertion failure
innodb.innodb-wl5522 : MDEV-13644 - Assertion failure
innodb.innodb-wl5522-1 : MDEV-22945 - Server crash
innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno
innodb.innodb_buffer_pool_dump_pct : MDEV-20139 - Timeout in wait_condition.inc
-innodb.innodb_buffer_pool_resize : MDEV-16964 - Assertion failure
+innodb.innodb_buffer_pool_resize : MDEV-23637 - Assertion failure
innodb.innodb_buffer_pool_resize_debug : MDEV-22515 - Timeout in wait_condition
-innodb.innodb_buffer_pool_resize_with_chunks : MDEV-16964 - Assertion failure
-innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt
+innodb.innodb_buffer_pool_resize_with_chunks : MDEV-23637 - Assertion failure
innodb.innodb_bug30423 : MDEV-7311 - Wrong result
innodb.innodb_bug47167 : MDEV-20524 - Table 'user' is marked as crashed and should be repaired
innodb.innodb_bug48024 : MDEV-14352 - Assertion failure
@@ -440,50 +387,36 @@ innodb.innodb_force_recovery_rollback : MDEV-22889 - Wrong result
innodb.innodb_information_schema : MDEV-8851 - Wrong result
innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed
innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result
-innodb.innodb_monitor : MDEV-10939 - Testcase timeout
innodb.innodb_mysql : MDEV-19873 - Wrong result
innodb.innodb_simulate_comp_failures_small : MDEV-20526 - ASAN use-after-poison
innodb.innodb_stats : MDEV-10682 - wrong result
-innodb.innodb_stats_drop_locked : Modified in 10.4.16
innodb.innodb_stats_persistent : MDEV-21567 - Wrong result in execution plan
innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed
innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait
-innodb.innodb_trx_weight : Configuration deleted in 10.4.16
innodb.innodb_zip_innochecksum2 : MDEV-13882 - Warning: difficult to find free blocks
-innodb.instant_alter_bugs : Modified in 10.4.16
-innodb.instant_alter_crash : Modified in 10.4.16
innodb.instant_alter_extend : MDEV-20963 - Binary files differ
-innodb.instant_alter_index_rename : Modified in 10.4.16
-innodb.instant_alter_purge : Modified in 10.4.16
innodb.log_corruption : MDEV-13251 - Wrong result
innodb.log_data_file_size : MDEV-14204 - Server failed to start; MDEV-20648 - Assertion failure
-innodb.log_file : MDEV-20159 - Assertion failure
innodb.log_file_name : MDEV-14193 - Exception
innodb.log_file_size : MDEV-15668 - Not found pattern
innodb.monitor : MDEV-16179 - Wrong result
-innodb.page_id_innochecksum : MDEV-20159 - Assertion failure
innodb.purge_secondary : MDEV-15681 - Wrong result
innodb.purge_thread_shutdown : MDEV-13792 - Wrong result
innodb.read_only_recovery : MDEV-13886 - Server crash
innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile
-innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace; modified in 10.4.16
-innodb.stats_persistent : Added in 10.4.16
+innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace
innodb.table_definition_cache_debug : MDEV-14206 - Extra warning
-innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start; modified in 10.4.16
+innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start
innodb.temp_table_savepoint : MDEV-24077 - Assertion failure
innodb.temporary_table : MDEV-13265 - Wrong result
-innodb.truncate : Modified in 10.4.16
innodb.undo_truncate : MDEV-17340 - Server hung; MDEV-20840 - Sporadic timeout
innodb.undo_truncate_recover : MDEV-17679 - Server has gone away; MDEV-19200 - Shutdown fails
-innodb.update-cascade : Combinations added in 10.4.16
innodb.update_time : MDEV-14804 - Wrong result
innodb.xa_recovery : MDEV-15279 - mysqld got exception
#-----------------------------------------------------------------------
-innodb_fts.basic : Modified in 10.4.16
innodb_fts.fulltext2 : MDEV-24074 - Server crash
-innodb_fts.innodb_fts_misc_1 : Modified in 10.4.16
innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning
innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log
innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed
@@ -493,20 +426,18 @@ innodb_fts.sync_ddl : MDEV-21568 - Errno: 2000; MDEV-18654 -
innodb_gis.alter_spatial_index : MDEV-13745 - Server crash
innodb_gis.gis_split_nan : MDEV-21678 - Cannot get geometry object
-innodb_gis.rtree_add_index : Include file modified in 10.4.16
-innodb_gis.rtree_compress : Include file modified in 10.4.16
innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result
innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded
-innodb_gis.rtree_purge : MDEV-15275 - Timeout; include file modified in 10.4.16
+innodb_gis.rtree_purge : MDEV-15275 - Timeout
innodb_gis.rtree_recovery : MDEV-15274 - Error on check
innodb_gis.rtree_split : MDEV-14208 - Too many arguments
-innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; include file modified in 10.4.16
+innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file
innodb_gis.types : MDEV-15679 - Table is marked as crashed
#-----------------------------------------------------------------------
innodb_zip.cmp_per_index : MDEV-14490 - Table is marked as crashed
-innodb_zip.create_options : MDEV-21329 - Assertion failure; MDEV-24076 - Assertion failure
+innodb_zip.create_options : MDEV-24076 - Assertion failure
innodb_zip.index_large_prefix_4k : MDEV-21679 - Row size too large
innodb_zip.innochecksum : MDEV-14486 - Server failed to shut down
innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings
@@ -518,39 +449,32 @@ innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error 192
#-----------------------------------------------------------------------
-maria.alter : Modified in 10.4.16
-maria.create : Modified in 10.4.16
maria.insert_select : MDEV-12757 - Timeout
maria.insert_select-7314 : MDEV-16492 - Timeout
maria.maria : MDEV-14430 - Extra warning
-maria.maria-no-logging : MDEV-20196 - Crash on shutdown or server can't start; modified in 10.4.16
-
-#-----------------------------------------------------------------------
-
-mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result
-mariabackup.apply-log-only : MDEV-20135 - Timeout
-mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown
-mariabackup.create_with_data_directory_during_backup : MDEV-20159 - Assertion failure
-mariabackup.data_directory : MDEV-15270 - Error on exec
-mariabackup.ddl_incremental_encrypted : Added in 10.4.16
-mariabackup.full_backup : MDEV-16571 - Wrong result
-mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist
-mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure
-mariabackup.incremental_ddl_during_backup : Modified in 10.4.16
-mariabackup.incremental_encrypted : MDEV-15667 - timeout
-mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file
-mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result; added in 10.4.16
-mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result
-mariabackup.mdev-14447 : MDEV-15201 - Timeout
-mariabackup.mlog_index_load : Modified in 10.4.16
-mariabackup.partial_exclude : MDEV-15270 - Error on exec
-mariabackup.rpl_slave_info : Added in 10.4.16
-mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error; include file modified in 10.4.16
-mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault
-mariabackup.xb_file_key_management : MDEV-16571 - Wrong result
-mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11
-mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown
-mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown
+maria.maria-no-logging : MDEV-20196 - Crash on shutdown or server can't start
+
+#-----------------------------------------------------------------------
+
+mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result
+mariabackup.apply-log-only : MDEV-20135 - Timeout
+mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown
+mariabackup.data_directory : MDEV-15270 - Error on exec
+mariabackup.full_backup : MDEV-16571 - Wrong result
+mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist
+mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure
+mariabackup.incremental_encrypted : MDEV-15667 - timeout
+mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file
+mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result
+mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result
+mariabackup.mdev-14447 : MDEV-15201 - Timeout
+mariabackup.partial_exclude : MDEV-15270 - Error on exec
+mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error
+mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault
+mariabackup.xb_file_key_management : MDEV-16571 - Wrong result
+mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11
+mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown
+mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown
#-----------------------------------------------------------------------
@@ -575,8 +499,6 @@ multi_source.load_data : MDEV-21235 - Slave crash
multi_source.mdev-8874 : MDEV-19415 - AddressSanitizer: heap-use-after-free
multi_source.mdev-9544 : MDEV-19415 - AddressSanitizer: heap-use-after-free
multi_source.multisource : MDEV-10417 - Fails on Mips
-multi_source.reset_slave : MDEV-10690 - Wrong result
-multi_source.simple : MDEV-4633 - Wrong result
multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats
#-----------------------------------------------------------------------
@@ -596,7 +518,6 @@ parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket;
parts.partition_exch_qa_10 : MDEV-11765 - wrong result
parts.partition_innodb_status_file : MDEV-12901 - Valgrind
parts.partition_special_innodb : MDEV-16942 - Timeout
-parts.reorganize : Added in 10.4.16
#-----------------------------------------------------------------------
@@ -635,16 +556,10 @@ perfschema_stress.* : MDEV-10996 - Not maintained
#-----------------------------------------------------------------------
-period.delete : Modified in 10.4.16
-period.update : Modified in 10.4.16
-period.versioning : MDEV-20159 - Assertion failure
-
-#-----------------------------------------------------------------------
-
plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such
plugins.multiauth : MDEV-20163 - Plugin could not be loaded
plugins.processlist : MDEV-16574 - Wrong result
-plugins.server_audit : MDEV-14295 - Wrong result; modified in 10.4.16
+plugins.server_audit : MDEV-14295 - Wrong result
plugins.thread_pool_server_audit : MDEV-14295 - Wrong result
#-----------------------------------------------------------------------
@@ -684,124 +599,105 @@ rocksdb_sys_vars.rocksdb_rate_limiter_bytes_per_sec_basic : MDEV-16639 - Crash
#-----------------------------------------------------------------------
-roles.acl_load_mutex-5170 : Modified in 10.4.16
roles.create_and_grant_role : MDEV-11772 - wrong result
#-----------------------------------------------------------------------
-rpl.circular_serverid0 : MDEV-19372 - ASAN heap-use-after-free
-rpl.create_or_replace2 : MDEV-19412 - Lost connection to MySQL server
-rpl.create_or_replace_mix : MDEV-20523 - Wrong result
-rpl.create_or_replace_statement : MDEV-20523 - Wrong result
-rpl.create_select : MDEV-14121 - Assertion failure
-rpl.last_insert_id : MDEV-10625 - warnings in error log
-rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips
-rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips
-rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log
-rpl.rpl_binlog_errors : MDEV-12742 - Crash
-rpl.rpl_binlog_grant : MDEV-21274 - Lost connection at handshake
-rpl.rpl_binlog_index : Modified in 10.4.16
-rpl.rpl_cant_read_event_incident : MDEV-20960 - Abort on shutdown
-rpl.rpl_checksum_cache : MDEV-22510 - Server crash
-rpl.rpl_circular_for_4_hosts : MDEV-20536 - Server crash
-rpl.rpl_colSize : MDEV-16112 - Server crash
-rpl.rpl_corruption : MDEV-20527 - Slave stopped with wrong error code
-rpl.rpl_create_tmp_table_if_not_exists : MDEV-20159 - Assertion failure
-rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac
-rpl.rpl_ddl : MDEV-10417 - Fails on Mips
-rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash
-rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed
-rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed
-rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start
-rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning
-rpl.rpl_extra_col_master_myisam : MDEV-23372 - Extra warning
-rpl.rpl_filter_tables_dynamic : Modified in 10.4.16
-rpl.rpl_filter_wild_tables_dynamic : Modified in 10.4.16
-rpl.rpl_flushlog_loop : MDEV-21570 - Server crash
-rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output
-rpl.rpl_gtid_basic : MDEV-10681 - server startup problem
-rpl.rpl_gtid_crash : MDEV-13643 - Lost connection; modified in 10.4.16
-rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout; MDEV-23103 - Could not delete gtid domain; modified in 10.4.16
-rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash
-rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings
-rpl.rpl_gtid_reconnect : MDEV-14497 - Crash
-rpl.rpl_gtid_startpos : MDEV-20141 - mysqltest failed but provided no output
-rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown, MDEV-12629 - Valgrind warnings
-rpl.rpl_gtid_until : MDEV-10625 - warnings in error log
-rpl.rpl_ignore_grant : MDEV-20159 - Assertion failure
-rpl.rpl_ignore_table_update : MDEV-20159 - Assertion failure
-rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips
-rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x
-rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x
-rpl.rpl_insert_id : MDEV-15197 - Wrong result
-rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure
-rpl.rpl_insert_ignore : MDEV-14365 - Lost connection to MySQL server during query
-rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips
-rpl.rpl_ipv4_as_ipv6 : MDEV-20147 - Incorrect checksum for freed object
-rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog
-rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed
-rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash
-rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout
-rpl.rpl_mysql_upgrade : Modified in 10.4.16
-rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait
-rpl.rpl_old_master : MDEV-22956 - Failing assertion
-rpl.rpl_parallel : MDEV-10653 - Timeouts
-rpl.rpl_parallel2 : MDEV-17390 - Operation cannot be performed
-rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash
-rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure
-rpl.rpl_parallel_multilevel : MDEV-20160 - Server crash
-rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout
-rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master
-rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master
-rpl.rpl_parallel_optimistic_until : MDEV-23021 - Query didn't return a result set
-rpl.rpl_parallel_retry : MDEV-11119 - Crash; MDEV-17109 - Timeout; modified in 10.4.16
-rpl.rpl_parallel_temptable : MDEV-10356 - Crash; MDEV-19076 - Wrong result
-rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips
-rpl.rpl_password_boundaries : MDEV-11534 - Slave IO warnings
-rpl.rpl_read_only : MDEV-20159 - Assertion failure
-rpl.rpl_rewrt_db : MDEV-24060 - Server did not start
-rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails
-rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start
-rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed
-rpl.rpl_row_corruption : MDEV-21569 - mutex: LOCK_global_system_variables unlocking
-rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result
-rpl.rpl_row_end_of_statement_loss : MDEV-21237 - Server crash
-rpl.rpl_row_img_blobs : MDEV-13875 - command "diff_files" failed
-rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed
-rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed
-rpl.rpl_row_index_choice : MDEV-15196 - Slave crash
-rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x
-rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum
-rpl.rpl_semi_sync : MDEV-11220 - Wrong result
-rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result
-rpl.rpl_semi_sync_after_sync_row : MDEV-14366 - Wrong result
-rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings
-rpl.rpl_semi_sync_skip_repl : MDEV-23371 - Server crash
-rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures
-rpl.rpl_semi_sync_wait_no_slave : MDEV-20159 - Assertion failure
-rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition
-rpl.rpl_semisync_ali_issues : MDEV-16272 - Wrong result
-rpl.rpl_show_slave_hosts : MDEV-10681 - Crash
-rpl.rpl_shutdown_wait_slaves : MDEV-22517 - Timeout on sync_with_master
-rpl.rpl_skip_replication : MDEV-23372 - Extra warning
-rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock; re-enabled in 10.4.16; modified in 10.4.16
-rpl.rpl_slave_load_in : MDEV-20159 - Assertion failure
-rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning
-rpl.rpl_slow_query_log : MDEV-13250 - Test abort
-rpl.rpl_sp_effects : MDEV-13249 - Crash
-rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout; modified in 10.4.16
-rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion
-rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash
-rpl.rpl_sync : MDEV-10633 - Database page corruption
-rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries
-rpl.rpl_test_framework : MDEV-19368 - mysqltest failed but provided no output
-rpl.rpl_trigger : MDEV-18055 - Wrong result
-rpl.rpl_truncate_3innodb : MDEV-19454 - Syntax error
-rpl.rpl_upgrade_master_info : MDEV-16567 - Assertion failure
-rpl.rpl_user_variables : MDEV-20522 - Wrong result
-rpl.rpl_variables : MDEV-20150 - Server crash
-rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result
-rpl.show_status_stop_slave_race-7126 : Modified in 10.4.16
+rpl.circular_serverid0 : MDEV-19372 - ASAN heap-use-after-free
+rpl.create_or_replace2 : MDEV-19412 - Lost connection to MySQL server
+rpl.create_or_replace_mix : MDEV-20523 - Wrong result
+rpl.create_or_replace_statement : MDEV-20523 - Wrong result
+rpl.create_select : MDEV-14121 - Assertion failure
+rpl.last_insert_id : MDEV-10625 - warnings in error log
+rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips
+rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips
+rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log
+rpl.rpl_binlog_errors : MDEV-12742 - Crash
+rpl.rpl_binlog_grant : MDEV-21274 - Lost connection at handshake
+rpl.rpl_cant_read_event_incident : MDEV-20960 - Abort on shutdown
+rpl.rpl_checksum_cache : MDEV-22510 - Server crash
+rpl.rpl_circular_for_4_hosts : MDEV-20536 - Server crash
+rpl.rpl_colSize : MDEV-16112 - Server crash
+rpl.rpl_corruption : MDEV-20527 - Slave stopped with wrong error code
+rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac
+rpl.rpl_ddl : MDEV-10417 - Fails on Mips
+rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash
+rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed
+rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed
+rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start
+rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning
+rpl.rpl_extra_col_master_myisam : MDEV-23372 - Extra warning
+rpl.rpl_flushlog_loop : MDEV-21570 - Server crash
+rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output
+rpl.rpl_gtid_basic : MDEV-10681 - server startup problem
+rpl.rpl_gtid_crash : MDEV-13643 - Lost connection
+rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash
+rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings
+rpl.rpl_gtid_reconnect : MDEV-14497 - Crash
+rpl.rpl_gtid_startpos : MDEV-20141 - mysqltest failed but provided no output
+rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown, MDEV-12629 - Valgrind warnings
+rpl.rpl_gtid_until : MDEV-10625 - warnings in error log
+rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips
+rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x
+rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x
+rpl.rpl_insert_id : MDEV-15197 - Wrong result
+rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure
+rpl.rpl_insert_ignore : MDEV-14365 - Lost connection to MySQL server during query
+rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips
+rpl.rpl_ipv4_as_ipv6 : MDEV-20147 - Incorrect checksum for freed object
+rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog
+rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed
+rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash
+rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout
+rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait
+rpl.rpl_old_master : MDEV-22956 - Failing assertion
+rpl.rpl_parallel : MDEV-10653 - Timeouts; MDEV-24110 - Slave crash
+rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash
+rpl.rpl_parallel_multilevel : MDEV-20160 - Server crash
+rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout
+rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master
+rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master
+rpl.rpl_parallel_optimistic_until : MDEV-23021 - Query didn't return a result set
+rpl.rpl_parallel_retry : MDEV-11119 - Crash; MDEV-17109 - Timeout
+rpl.rpl_parallel_stop_on_con_kill : MDEV-24110 - Slave crash
+rpl.rpl_parallel_temptable : MDEV-10356 - Crash; MDEV-19076 - Wrong result
+rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips
+rpl.rpl_password_boundaries : MDEV-11534 - Slave IO warnings
+rpl.rpl_rewrt_db : MDEV-24060 - Server did not start
+rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails
+rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start
+rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed
+rpl.rpl_row_corruption : MDEV-21569 - mutex: LOCK_global_system_variables unlocking
+rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result
+rpl.rpl_row_end_of_statement_loss : MDEV-21237 - Server crash
+rpl.rpl_row_img_blobs : MDEV-13875 - command "diff_files" failed
+rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed
+rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed
+rpl.rpl_row_index_choice : MDEV-15196 - Slave crash
+rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x
+rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum
+rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings
+rpl.rpl_semi_sync_skip_repl : MDEV-23371 - Server crash
+rpl.rpl_semi_sync_uninstall_plugin : MDEV-24561 - Wrong usage of mutex; MDEV-7140 - Assorted failures
+rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition
+rpl.rpl_show_slave_hosts : MDEV-10681 - Crash
+rpl.rpl_shutdown_wait_slaves : MDEV-22517 - Timeout on sync_with_master
+rpl.rpl_skip_replication : MDEV-23372 - Extra warning
+rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning
+rpl.rpl_slow_query_log : MDEV-13250 - Test abort
+rpl.rpl_sp_effects : MDEV-13249 - Crash
+rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout
+rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion
+rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash
+rpl.rpl_sync : MDEV-10633 - Database page corruption
+rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries
+rpl.rpl_test_framework : MDEV-19368 - mysqltest failed but provided no output
+rpl.rpl_trigger : MDEV-18055 - Wrong result
+rpl.rpl_truncate_3innodb : MDEV-19454 - Syntax error
+rpl.rpl_upgrade_master_info : MDEV-16567 - Assertion failure
+rpl.rpl_user_variables : MDEV-20522 - Wrong result
+rpl.rpl_variables : MDEV-20150 - Server crash
+rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result
#-----------------------------------------------------------------------
@@ -826,19 +722,13 @@ spider.basic_sql : MDEV-11186 - Internal check fails
#-----------------------------------------------------------------------
-spider/bg.* : MDEV-24059 - Timeout
-spider/bg.ha : MDEV-9329 - failures on s390x
-spider/bg.ha_part : MDEV-9329 - Fails on Ubuntu/s390x
-spider/bg.spider3_fixes : MDEV-12639 - Syntax error
-spider/bg.spider_fixes : MDEV-9329 - failures on s390x
-spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x
-
-#-----------------------------------------------------------------------
-
-spider/bugfix.direct_sql_with_comma_pwd : Added in 10.4.16
-spider/bugfix.mdev_20100 : Added in 10.4.16
-spider/bugfix.mdev_22246 : Added in 10.4.16
-spider/bugfix.xa_cmd : Added in 10.4.16
+spider/bg.* : MDEV-24059 - Timeout
+spider/bg.ha : MDEV-9329 - failures on s390x
+spider/bg.ha_part : MDEV-9329 - Fails on Ubuntu/s390x
+spider/bg.spider3_fixes : MDEV-12639 - Syntax error
+spider/bg.spider3_fixes_part : MDEV-24809 - Timeout
+spider/bg.spider_fixes : MDEV-9329 - failures on s390x
+spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x
#-----------------------------------------------------------------------
@@ -848,7 +738,6 @@ spider/handler.* : MDEV-10987, MDEV-10990 - Tests have not been maintained
sql_sequence.concurrent_create : MDEV-16635 - Server crash
sql_sequence.kill : MDEV-23393 - Server crash
-sql_sequence.next : Modified in 10.4.16
sql_sequence.read_only : MDEV-22956 - Failing assertion
#-----------------------------------------------------------------------
@@ -865,16 +754,12 @@ sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu
sys_vars.host_cache_size_auto : MDEV-20112 - Wrong result
sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected error
sys_vars.innodb_checksum_algorithm_basic : MDEV-21568 - Errno: 2000
+sys_vars.innodb_flush_method_func : MDEV-24810 - Server failed to restart
sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout
sys_vars.log_slow_admin_statements_func : MDEV-12235 - Server crash
-sys_vars.replicate_do_db_basic : Modified in 10.4.16
-sys_vars.rpl_init_slave_func : Modified in 10.4.16
-sys_vars.session_track_system_variables_basic : Modified in 10.4.16
sys_vars.slow_query_log_func : MDEV-14273 - Wrong result
sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result
sys_vars.wait_timeout_func : MDEV-12896 - Wrong result
-sys_vars.wsrep_cluster_address_basic : Modified in 10.4.16
-sys_vars.wsrep_on_basic : Configuration deleted in 10.4.16
#-----------------------------------------------------------------------
@@ -906,7 +791,7 @@ tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout
#-----------------------------------------------------------------------
-tokudb_backup.* : MDEV-11001 - Missing include file
+tokudb_backup.* : MDEV-11001 - Missing include file (Won't fix)
#-----------------------------------------------------------------------
@@ -925,11 +810,11 @@ tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection
#-----------------------------------------------------------------------
-tokudb_rpl.* : MDEV-11001 - Missing include file
+tokudb_rpl.* : MDEV-11001 - Missing include file (Won't fix)
#-----------------------------------------------------------------------
-tokudb_sys_vars.* : MDEV-11001 - Missing include file
+tokudb_sys_vars.* : MDEV-11001 - Missing include file (Won't fix)
#-----------------------------------------------------------------------
@@ -947,28 +832,17 @@ unit.mf_iocache : MDEV-20952 - ASAN stack-buffer-overflow
vcol.not_supported : MDEV-10639 - Testcase timeout
vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout
-vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.4.16
+vcol.vcol_misc : MDEV-16651 - Wrong error message
#-----------------------------------------------------------------------
-versioning.create : Modified in 10.4.16
-versioning.select : Modified in 10.4.16
-versioning.sysvars : Modified in 10.4.16
-versioning.update : MDEV-22475 - Wrong result code
-versioning.view : Modified in 10.4.16
+versioning.update : MDEV-22475 - Wrong result code
#-----------------------------------------------------------------------
-wsrep.MDEV-22443 : Added in 10.4.16
-wsrep.MDEV-23081 : Added in 10.4.16
-wsrep.MDEV-23092 : Added in 10.4.16
-wsrep.MDEV-23466 : Added in 10.4.16
wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node
-wsrep.mdev_22681 : Added in 10.4.16
wsrep.mdev_6832 : MDEV-14195 - Check testcase failed
wsrep.pool_of_threads : MDEV-17345 - WSREP has not yet prepared node for application use
-wsrep.variables : Modified in 10.4.16
-wsrep.variables_debug : Added in 10.4.16
#-----------------------------------------------------------------------
diff --git a/mysys/file_logger.c b/mysys/file_logger.c
index 71394be7afc..476ed44089e 100644
--- a/mysys/file_logger.c
+++ b/mysys/file_logger.c
@@ -150,23 +150,34 @@ exit:
}
+/*
+ Return 1 if we should rotate the log
+*/
+
+my_bool logger_time_to_rotate(LOGGER_HANDLE *log)
+{
+ my_off_t filesize;
+ if (log->rotations > 0 &&
+ (filesize= my_tell(log->file, MYF(0))) != (my_off_t) -1 &&
+ ((ulonglong) filesize >= log->size_limit))
+ return 1;
+ return 0;
+}
+
+
int logger_vprintf(LOGGER_HANDLE *log, const char* fmt, va_list ap)
{
int result;
- my_off_t filesize;
char cvtbuf[1024];
size_t n_bytes;
flogger_mutex_lock(&log->lock);
- if (log->rotations > 0)
- if ((filesize= my_tell(log->file, MYF(0))) == (my_off_t) -1 ||
- ((unsigned long long)filesize >= log->size_limit &&
- do_rotate(log)))
- {
- result= -1;
- errno= my_errno;
- goto exit; /* Log rotation needed but failed */
- }
+ if (logger_time_to_rotate(log) && do_rotate(log))
+ {
+ result= -1;
+ errno= my_errno;
+ goto exit; /* Log rotation needed but failed */
+ }
n_bytes= my_vsnprintf(cvtbuf, sizeof(cvtbuf), fmt, ap);
if (n_bytes >= sizeof(cvtbuf))
@@ -180,21 +191,18 @@ exit:
}
-int logger_write(LOGGER_HANDLE *log, const char *buffer, size_t size)
+static int logger_write_r(LOGGER_HANDLE *log, my_bool allow_rotations,
+ const char *buffer, size_t size)
{
int result;
- my_off_t filesize;
flogger_mutex_lock(&log->lock);
- if (log->rotations > 0)
- if ((filesize= my_tell(log->file, MYF(0))) == (my_off_t) -1 ||
- ((unsigned long long)filesize >= log->size_limit &&
- do_rotate(log)))
- {
- result= -1;
- errno= my_errno;
- goto exit; /* Log rotation needed but failed */
- }
+ if (allow_rotations && logger_time_to_rotate(log) && do_rotate(log))
+ {
+ result= -1;
+ errno= my_errno;
+ goto exit; /* Log rotation needed but failed */
+ }
result= (int)my_write(log->file, (uchar *) buffer, size, MYF(0));
@@ -204,6 +212,11 @@ exit:
}
+int logger_write(LOGGER_HANDLE *log, const char *buffer, size_t size)
+{
+ return logger_write_r(log, TRUE, buffer, size);
+}
+
int logger_rotate(LOGGER_HANDLE *log)
{
int result;
diff --git a/mysys/lf_hash.c b/mysys/lf_hash.c
index e626e88b6fd..7e401a63194 100644
--- a/mysys/lf_hash.c
+++ b/mysys/lf_hash.c
@@ -32,10 +32,10 @@
/* An element of the list */
typedef struct {
- intptr volatile link; /* a pointer to the next element in a list and a flag */
- uint32 hashnr; /* reversed hash number, for sorting */
+ intptr link; /* a pointer to the next element in a list and a flag */
const uchar *key;
size_t keylen;
+ uint32 hashnr; /* reversed hash number, for sorting */
/*
data is stored here, directly after the keylen.
thus the pointer to data is (void*)(slist_element_ptr+1)
@@ -49,7 +49,7 @@ const int LF_HASH_OVERHEAD= sizeof(LF_SLIST);
in a list) from l_find to l_insert/l_delete
*/
typedef struct {
- intptr volatile *prev;
+ intptr *prev;
LF_SLIST *curr, *next;
} CURSOR;
@@ -86,7 +86,7 @@ typedef struct {
0 - ok
1 - error (callbck returned 1)
*/
-static int l_find(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
+static int l_find(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr,
const uchar *key, size_t keylen, CURSOR *cursor, LF_PINS *pins,
my_hash_walk_action callback)
{
@@ -169,7 +169,7 @@ retry:
it uses pins[0..2], on return all pins are removed.
if there're nodes with the same key value, a new node is added before them.
*/
-static LF_SLIST *l_insert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
+static LF_SLIST *l_insert(LF_SLIST **head, CHARSET_INFO *cs,
LF_SLIST *node, LF_PINS *pins, uint flags)
{
CURSOR cursor;
@@ -221,7 +221,7 @@ static LF_SLIST *l_insert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
NOTE
it uses pins[0..2], on return all pins are removed.
*/
-static int l_delete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
+static int l_delete(LF_SLIST **head, CHARSET_INFO *cs, uint32 hashnr,
const uchar *key, uint keylen, LF_PINS *pins)
{
CURSOR cursor;
@@ -279,7 +279,7 @@ static int l_delete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
it uses pins[0..2], on return the pin[2] keeps the node found
all other pins are removed.
*/
-static LF_SLIST *l_search(LF_SLIST * volatile *head, CHARSET_INFO *cs,
+static LF_SLIST *l_search(LF_SLIST **head, CHARSET_INFO *cs,
uint32 hashnr, const uchar *key, uint keylen,
LF_PINS *pins)
{
@@ -320,7 +320,7 @@ static inline my_hash_value_type calc_hash(CHARSET_INFO *cs,
#define MAX_LOAD 1.0 /* average number of elements in a bucket */
-static int initialize_bucket(LF_HASH *, LF_SLIST * volatile*, uint, LF_PINS *);
+static int initialize_bucket(LF_HASH *, LF_SLIST **, uint, LF_PINS *);
static void default_initializer(LF_HASH *hash, void *dst, const void *src)
{
@@ -399,7 +399,7 @@ void lf_hash_destroy(LF_HASH *hash)
int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
{
int csize, bucket, hashnr;
- LF_SLIST *node, * volatile *el;
+ LF_SLIST *node, **el;
node= (LF_SLIST *)lf_alloc_new(pins);
if (unlikely(!node))
@@ -438,7 +438,7 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
*/
int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
{
- LF_SLIST * volatile *el;
+ LF_SLIST **el;
uint bucket, hashnr;
hashnr= hash->hash_function(hash->charset, (uchar *)key, keylen) & INT_MAX32;
@@ -474,7 +474,7 @@ void *lf_hash_search_using_hash_value(LF_HASH *hash, LF_PINS *pins,
my_hash_value_type hashnr,
const void *key, uint keylen)
{
- LF_SLIST * volatile *el, *found;
+ LF_SLIST **el, *found;
uint bucket;
/* hide OOM errors - if we cannot initialize a bucket, try the previous one */
@@ -508,7 +508,7 @@ int lf_hash_iterate(LF_HASH *hash, LF_PINS *pins,
CURSOR cursor;
uint bucket= 0;
int res;
- LF_SLIST * volatile *el;
+ LF_SLIST **el;
el= lf_dynarray_lvalue(&hash->array, bucket);
if (unlikely(!el))
@@ -540,13 +540,13 @@ static const uchar *dummy_key= (uchar*)"";
0 - ok
-1 - out of memory
*/
-static int initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
+static int initialize_bucket(LF_HASH *hash, LF_SLIST **node,
uint bucket, LF_PINS *pins)
{
uint parent= my_clear_highest_bit(bucket);
LF_SLIST *dummy= (LF_SLIST *)my_malloc(sizeof(LF_SLIST), MYF(MY_WME));
LF_SLIST **tmp= 0, *cur;
- LF_SLIST * volatile *el= lf_dynarray_lvalue(&hash->array, parent);
+ LF_SLIST **el= lf_dynarray_lvalue(&hash->array, parent);
if (unlikely(!el || !dummy))
return -1;
if (*el == NULL && bucket &&
diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c
index 0085c375aa1..b438d910157 100644
--- a/mysys/ma_dyncol.c
+++ b/mysys/ma_dyncol.c
@@ -865,7 +865,7 @@ dynamic_column_uint_read(DYNAMIC_COLUMN_VALUE *store_it_here,
static size_t dynamic_column_sint_bytes(longlong val)
{
- return dynamic_column_uint_bytes((val << 1) ^
+ return dynamic_column_uint_bytes((((ulonglong) val) << 1) ^
(val < 0 ? 0xffffffffffffffffull : 0));
}
@@ -883,8 +883,8 @@ static enum enum_dyncol_func_result
dynamic_column_sint_store(DYNAMIC_COLUMN *str, longlong val)
{
return dynamic_column_uint_store(str,
- (val << 1) ^
- (val < 0 ? 0xffffffffffffffffULL : 0));
+ (((ulonglong) val) << 1) ^
+ (val < 0 ? 0xffffffffffffffffULL : 0));
}
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index 931df6ce220..559ddbec1ee 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -1612,7 +1612,7 @@ int _my_b_async_read(IO_CACHE *info, uchar *Buffer, size_t Count)
Buffer+=length;
Count-=length;
left_length+=length;
- info->read_end=info->rc_pos+read_length;
+ info->read_end=info->read_pos+read_length;
info->read_pos+=length;
}
else
diff --git a/mysys/my_addr_resolve.c b/mysys/my_addr_resolve.c
index 3a49c749010..f0c0d214171 100644
--- a/mysys/my_addr_resolve.c
+++ b/mysys/my_addr_resolve.c
@@ -170,7 +170,7 @@ static pid_t pid;
static char addr2line_binary[1024];
static char output[1024];
static struct pollfd poll_fds;
-Dl_info info;
+static Dl_info info;
int start_addr2line_fork(const char *binary_path)
{
diff --git a/mysys/my_seek.c b/mysys/my_seek.c
index 6a370b0ad43..db364ccddda 100644
--- a/mysys/my_seek.c
+++ b/mysys/my_seek.c
@@ -86,7 +86,7 @@ my_off_t my_tell(File fd, myf MyFlags)
DBUG_ENTER("my_tell");
DBUG_PRINT("my",("fd: %d MyFlags: %lu",fd, MyFlags));
DBUG_ASSERT(fd >= 0);
-#if defined (HAVE_TELL) && !defined (_WIN32)
+#if defined (HAVE_TELL) && !defined (_WIN32) && !defined(_AIX)
pos= tell(fd);
#else
pos= my_seek(fd, 0L, MY_SEEK_CUR,0);
diff --git a/plugin/auth_ed25519/CMakeLists.txt b/plugin/auth_ed25519/CMakeLists.txt
index 1a3d5cc4bce..1033dc053c8 100644
--- a/plugin/auth_ed25519/CMakeLists.txt
+++ b/plugin/auth_ed25519/CMakeLists.txt
@@ -19,6 +19,10 @@ IF(MSVC)
SET_SOURCE_FILES_PROPERTIES(${REF10_SOURCES} PROPERTY COMPILE_FLAGS "/wd4244 /wd4146")
ENDIF()
+IF(CMAKE_C_COMPILER_ID MATCHES "GNU" AND CMAKE_C_COMPILER_VERSION LESS 11 AND CMAKE_C_COMPILER_VERSION GREATER 6)
+ SET_SOURCE_FILES_PROPERTIES(${REF10_SOURCES} PROPERTY COMPILE_FLAGS -fno-sanitize=shift)
+ENDIF()
+
# server plugin *cannot* link with the library, it needs all sources to be
# compiled with MYSQL_DYNAMIC_PLUGIN
MYSQL_ADD_PLUGIN(auth_ed25519 server_ed25519.c ${REF10_SOURCES} MODULE_ONLY)
diff --git a/plugin/auth_gssapi/CMakeLists.txt b/plugin/auth_gssapi/CMakeLists.txt
index 4d3718dd471..3205a58ebbe 100644
--- a/plugin/auth_gssapi/CMakeLists.txt
+++ b/plugin/auth_gssapi/CMakeLists.txt
@@ -18,7 +18,7 @@ ELSE()
SET(GSSAPI_SERVER gssapi_server.cc)
SET(GSSAPI_ERRMSG gssapi_errmsg.cc)
- IF(APPLE)
+ IF(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
SET_SOURCE_FILES_PROPERTIES(
${GSSAPI_CLIENT} ${GSSAPI_SERVER} ${GSSAPI_ERRMSG}
PROPERTY COMPILE_FLAGS "-Wno-deprecated-declarations")
diff --git a/plugin/auth_pam/CMakeLists.txt b/plugin/auth_pam/CMakeLists.txt
index f7d8e019751..8d11d174f90 100644
--- a/plugin/auth_pam/CMakeLists.txt
+++ b/plugin/auth_pam/CMakeLists.txt
@@ -4,10 +4,11 @@ INCLUDE (CheckFunctionExists)
CHECK_INCLUDE_FILES (security/pam_ext.h HAVE_PAM_EXT_H)
CHECK_INCLUDE_FILES (security/pam_appl.h HAVE_PAM_APPL_H)
CHECK_FUNCTION_EXISTS (strndup HAVE_STRNDUP)
+CHECK_FUNCTION_EXISTS (getgrouplist HAVE_GETGROUPLIST)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
-# Check whether getgrouplist uses git_t for second and third arguments.
+# Check whether getgrouplist uses gtid_t for second and third arguments.
SET(CMAKE_REQUIRED_FLAGS -Werror)
CHECK_C_SOURCE_COMPILES(
"
@@ -29,7 +30,7 @@ SET(CMAKE_REQUIRED_LIBRARIES pam)
CHECK_FUNCTION_EXISTS(pam_syslog HAVE_PAM_SYSLOG)
SET(CMAKE_REQUIRED_LIBRARIES)
-IF(HAVE_PAM_APPL_H)
+IF(HAVE_PAM_APPL_H AND HAVE_GETGROUPLIST)
FIND_LIBRARY(PAM_LIBRARY pam) # for srpm build-depends detection
ADD_DEFINITIONS(-D_GNU_SOURCE)
MYSQL_ADD_PLUGIN(auth_pam_v1 auth_pam_v1.c LINK_LIBRARIES pam MODULE_ONLY)
@@ -54,7 +55,7 @@ IF(HAVE_PAM_APPL_H)
SET(CPACK_RPM_server_USER_FILELIST ${CPACK_RPM_server_USER_FILELIST} "%config(noreplace) ${INSTALL_PAMDATADIR}/*" PARENT_SCOPE)
ENDIF()
ENDIF()
-ENDIF(HAVE_PAM_APPL_H)
+ENDIF()
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
${CMAKE_CURRENT_BINARY_DIR}/config_auth_pam.h)
diff --git a/plugin/auth_pam/testing/CMakeLists.txt b/plugin/auth_pam/testing/CMakeLists.txt
index c8d2e3cbb97..151823b9419 100644
--- a/plugin/auth_pam/testing/CMakeLists.txt
+++ b/plugin/auth_pam/testing/CMakeLists.txt
@@ -4,7 +4,7 @@ ADD_LIBRARY(pam_mariadb_mtr MODULE pam_mariadb_mtr.c)
SET_TARGET_PROPERTIES (pam_mariadb_mtr PROPERTIES PREFIX "")
TARGET_LINK_LIBRARIES(pam_mariadb_mtr pam)
-IF(APPLE)
+IF(CMAKE_C_COMPILER_ID MATCHES "Clang")
SET_SOURCE_FILES_PROPERTIES(
pam_mariadb_mtr.c
PROPERTY COMPILE_FLAGS "-Wno-incompatible-pointer-types-discards-qualifiers")
diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc
index 171440c41ad..3976c950541 100644
--- a/plugin/feedback/sender_thread.cc
+++ b/plugin/feedback/sender_thread.cc
@@ -47,7 +47,7 @@ static int table_to_string(TABLE *table, String *result)
res= table->file->ha_rnd_init(1);
- dbug_tmp_use_all_columns(table, table->read_set);
+ dbug_tmp_use_all_columns(table, &table->read_set);
while(!res && !table->file->ha_rnd_next(table->record[0]))
{
@@ -115,6 +115,7 @@ static int prepare_for_fill(TABLE_LIST *tables)
tables->init_one_table(&INFORMATION_SCHEMA_NAME, &tbl_name, 0, TL_READ);
tables->schema_table= i_s_feedback;
+ tables->schema_table_reformed= 1;
tables->select_lex= thd->lex->first_select_lex();
DBUG_ASSERT(tables->select_lex);
tables->table= create_schema_table(thd, tables);
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index 64fc319aa22..228f261b333 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -16,7 +16,7 @@
#define PLUGIN_VERSION 0x104
-#define PLUGIN_STR_VERSION "1.4.10"
+#define PLUGIN_STR_VERSION "1.4.13"
#define _my_thread_var loc_thread_var
@@ -140,6 +140,7 @@ static int loc_file_errno;
#define logger_write loc_logger_write
#define logger_rotate loc_logger_rotate
#define logger_init_mutexts loc_logger_init_mutexts
+#define logger_time_to_rotate loc_logger_time_to_rotate
static size_t loc_write(File Filedes, const uchar *Buffer, size_t Count)
@@ -554,22 +555,22 @@ static struct st_mysql_show_var audit_status[]=
{0,0,0}
};
-#if defined(HAVE_PSI_INTERFACE) && !defined(FLOGGER_NO_PSI)
-/* These belong to the service initialization */
+#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key key_LOCK_operations;
-static PSI_mutex_key key_LOCK_atomic;
-static PSI_mutex_key key_LOCK_bigbuffer;
static PSI_mutex_info mutex_key_list[]=
{
{ &key_LOCK_operations, "SERVER_AUDIT_plugin::lock_operations",
- PSI_FLAG_GLOBAL},
+ PSI_FLAG_GLOBAL}
+#ifndef FLOGGER_NO_PSI
+ ,
{ &key_LOCK_atomic, "SERVER_AUDIT_plugin::lock_atomic",
PSI_FLAG_GLOBAL},
{ &key_LOCK_bigbuffer, "SERVER_AUDIT_plugin::lock_bigbuffer",
PSI_FLAG_GLOBAL}
+#endif /*FLOGGER_NO_PSI*/
};
-#endif
-static mysql_mutex_t lock_operations;
+#endif /*HAVE_PSI_INTERFACE*/
+static mysql_prlock_t lock_operations;
static mysql_mutex_t lock_atomic;
static mysql_mutex_t lock_bigbuffer;
@@ -819,6 +820,7 @@ enum sa_keywords
SQLCOM_DML,
SQLCOM_GRANT,
SQLCOM_CREATE_USER,
+ SQLCOM_ALTER_USER,
SQLCOM_CHANGE_MASTER,
SQLCOM_CREATE_SERVER,
SQLCOM_SET_OPTION,
@@ -857,12 +859,8 @@ struct sa_keyword keywords_to_skip[]=
struct sa_keyword not_ddl_keywords[]=
{
- {4, "DROP", &function_word, SQLCOM_QUERY_ADMIN},
- {4, "DROP", &procedure_word, SQLCOM_QUERY_ADMIN},
{4, "DROP", &user_word, SQLCOM_DCL},
{6, "CREATE", &user_word, SQLCOM_DCL},
- {6, "CREATE", &function_word, SQLCOM_QUERY_ADMIN},
- {6, "CREATE", &procedure_word, SQLCOM_QUERY_ADMIN},
{6, "RENAME", &user_word, SQLCOM_DCL},
{0, NULL, 0, SQLCOM_DDL}
};
@@ -926,6 +924,7 @@ struct sa_keyword passwd_keywords[]=
{
{3, "SET", &password_word, SQLCOM_SET_OPTION},
{5, "ALTER", &server_word, SQLCOM_ALTER_SERVER},
+ {5, "ALTER", &user_word, SQLCOM_ALTER_USER},
{5, "GRANT", 0, SQLCOM_GRANT},
{6, "CREATE", &user_word, SQLCOM_CREATE_USER},
{6, "CREATE", &server_word, SQLCOM_CREATE_SERVER},
@@ -1320,19 +1319,41 @@ static void change_connection(struct connection_info *cn,
event->ip, event->ip_length);
}
+/*
+ Write to the log
+
+ @param take_lock If set, take a read lock (or write lock on rotate).
+ If not set, the caller has a already taken a write lock
+*/
+
static int write_log(const char *message, size_t len, int take_lock)
{
int result= 0;
if (take_lock)
- flogger_mutex_lock(&lock_operations);
+ {
+ /* Start by taking a read lock */
+ mysql_prlock_rdlock(&lock_operations);
+ }
if (output_type == OUTPUT_FILE)
{
- if (logfile &&
- (is_active= (logger_write(logfile, message, len) == (int) len)))
- goto exit;
- ++log_write_failures;
- result= 1;
+ if (logfile)
+ {
+ my_bool allow_rotate= !take_lock; /* Allow rotate if caller write lock */
+ if (take_lock && logger_time_to_rotate(logfile))
+ {
+ /* We have to rotate the log, change above read lock to write lock */
+ mysql_prlock_unlock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
+ allow_rotate= 1;
+ }
+ if (!(is_active= (logger_write_r(logfile, allow_rotate, message, len) ==
+ (int) len)))
+ {
+ ++log_write_failures;
+ result= 1;
+ }
+ }
}
else if (output_type == OUTPUT_SYSLOG)
{
@@ -1340,9 +1361,8 @@ static int write_log(const char *message, size_t len, int take_lock)
syslog_priority_codes[syslog_priority],
"%s %.*s", syslog_info, (int) len, message);
}
-exit:
if (take_lock)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
return result;
}
@@ -1581,27 +1601,32 @@ no_password:
-static int do_log_user(const char *name, int take_lock)
+static int do_log_user(const char *name, int len,
+ const char *proxy, int proxy_len, int take_lock)
{
- size_t len;
int result;
if (!name)
return 0;
- len= strlen(name);
if (take_lock)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_rdlock(&lock_operations);
if (incl_user_coll.n_users)
- result= coll_search(&incl_user_coll, name, len) != 0;
+ {
+ result= coll_search(&incl_user_coll, name, len) != 0 ||
+ (proxy && coll_search(&incl_user_coll, proxy, proxy_len) != 0);
+ }
else if (excl_user_coll.n_users)
- result= coll_search(&excl_user_coll, name, len) == 0;
+ {
+ result= coll_search(&excl_user_coll, name, len) == 0 &&
+ (proxy && coll_search(&excl_user_coll, proxy, proxy_len) == 0);
+ }
else
result= 1;
if (take_lock)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
return result;
}
@@ -1819,6 +1844,7 @@ do_log_query:
{
case SQLCOM_GRANT:
case SQLCOM_CREATE_USER:
+ case SQLCOM_ALTER_USER:
csize+= escape_string_hide_passwords(query, query_len,
uh_buffer, uh_buffer_size,
"IDENTIFIED", 10, "BY", 2, 0);
@@ -2070,13 +2096,9 @@ static void update_connection_info(struct connection_info *cn,
{
case MYSQL_AUDIT_CONNECTION_CONNECT:
setup_connection_connect(cn, event);
- if (event->status == 0 && event->proxy_user && event->proxy_user[0])
- log_proxy(cn, event);
break;
case MYSQL_AUDIT_CONNECTION_CHANGE_USER:
*after_action= AA_CHANGE_USER;
- if (event->proxy_user && event->proxy_user[0])
- log_proxy(cn, event);
break;
default:;
}
@@ -2141,7 +2163,9 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
}
if (event_class == MYSQL_AUDIT_GENERAL_CLASS && FILTER(EVENT_QUERY) &&
- cn && (cn->log_always || do_log_user(cn->user, 1)))
+ cn && (cn->log_always || do_log_user(cn->user, cn->user_length,
+ cn->proxy, cn->proxy_length,
+ 1)))
{
const struct mysql_event_general *event =
(const struct mysql_event_general *) ev;
@@ -2161,7 +2185,8 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
{
const struct mysql_event_table *event =
(const struct mysql_event_table *) ev;
- if (do_log_user(event->user, 1))
+ if (do_log_user(event->user, (int) SAFE_STRLEN(event->user),
+ cn->proxy, cn->proxy_length, 1))
{
switch (event->event_subclass)
{
@@ -2194,6 +2219,8 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
{
case MYSQL_AUDIT_CONNECTION_CONNECT:
log_connection(cn, event, event->status ? "FAILED_CONNECT": "CONNECT");
+ if (event->status == 0 && event->proxy_user && event->proxy_user[0])
+ log_proxy(cn, event);
break;
case MYSQL_AUDIT_CONNECTION_DISCONNECT:
if (use_event_data_for_disconnect)
@@ -2203,6 +2230,8 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev)
break;
case MYSQL_AUDIT_CONNECTION_CHANGE_USER:
log_connection(cn, event, "CHANGEUSER");
+ if (event->proxy_user && event->proxy_user[0])
+ log_proxy(cn, event);
break;
default:;
}
@@ -2489,11 +2518,11 @@ static int server_audit_init(void *p __attribute__((unused)))
servhost_len= (uint)strlen(servhost);
logger_init_mutexes();
-#if defined(HAVE_PSI_INTERFACE) && !defined(FLOGGER_NO_PSI)
+#ifdef HAVE_PSI_INTERFACE
if (PSI_server)
PSI_server->register_mutex("server_audit", mutex_key_list, 1);
#endif
- flogger_mutex_init(key_LOCK_operations, &lock_operations, MY_MUTEX_INIT_FAST);
+ mysql_prlock_init(key_LOCK_operations, &lock_operations);
flogger_mutex_init(key_LOCK_operations, &lock_atomic, MY_MUTEX_INIT_FAST);
flogger_mutex_init(key_LOCK_operations, &lock_bigbuffer, MY_MUTEX_INIT_FAST);
@@ -2581,7 +2610,7 @@ static int server_audit_deinit(void *p __attribute__((unused)))
closelog();
(void) free(big_buffer);
- flogger_mutex_destroy(&lock_operations);
+ mysql_prlock_destroy(&lock_operations);
flogger_mutex_destroy(&lock_atomic);
flogger_mutex_destroy(&lock_bigbuffer);
@@ -2692,7 +2721,7 @@ static void update_file_path(MYSQL_THD thd,
fprintf(stderr, "Log file name was changed to '%s'.\n", new_name);
if (!maria_55_started || !debug_server_started)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
if (logging)
log_current_query(thd);
@@ -2724,7 +2753,7 @@ static void update_file_path(MYSQL_THD thd,
file_path= path_buffer;
exit_func:
if (!maria_55_started || !debug_server_started)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
ADD_ATOMIC(internal_stop_logging, -1);
}
@@ -2740,9 +2769,9 @@ static void update_file_rotations(MYSQL_THD thd __attribute__((unused)),
if (!logging || output_type != OUTPUT_FILE)
return;
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
logfile->rotations= rotations;
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
}
@@ -2758,9 +2787,9 @@ static void update_file_rotate_size(MYSQL_THD thd __attribute__((unused)),
if (!logging || output_type != OUTPUT_FILE)
return;
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
logfile->size_limit= file_rotate_size;
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
}
@@ -2805,7 +2834,7 @@ static void update_incl_users(MYSQL_THD thd,
char *new_users= (*(char **) save) ? *(char **) save : empty_str;
size_t new_len= strlen(new_users) + 1;
if (!maria_55_started || !debug_server_started)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
mark_always_logged(thd);
if (new_len > sizeof(incl_user_buffer))
@@ -2819,7 +2848,7 @@ static void update_incl_users(MYSQL_THD thd,
error_header();
fprintf(stderr, "server_audit_incl_users set to '%s'.\n", incl_users);
if (!maria_55_started || !debug_server_started)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
}
@@ -2830,7 +2859,7 @@ static void update_excl_users(MYSQL_THD thd __attribute__((unused)),
char *new_users= (*(char **) save) ? *(char **) save : empty_str;
size_t new_len= strlen(new_users) + 1;
if (!maria_55_started || !debug_server_started)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
mark_always_logged(thd);
if (new_len > sizeof(excl_user_buffer))
@@ -2844,7 +2873,7 @@ static void update_excl_users(MYSQL_THD thd __attribute__((unused)),
error_header();
fprintf(stderr, "server_audit_excl_users set to '%s'.\n", excl_users);
if (!maria_55_started || !debug_server_started)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
}
@@ -2857,7 +2886,7 @@ static void update_output_type(MYSQL_THD thd,
return;
ADD_ATOMIC(internal_stop_logging, 1);
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
if (logging)
{
log_current_query(thd);
@@ -2871,7 +2900,7 @@ static void update_output_type(MYSQL_THD thd,
if (logging)
start_logging();
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
ADD_ATOMIC(internal_stop_logging, -1);
}
@@ -2901,9 +2930,9 @@ static void update_syslog_priority(MYSQL_THD thd __attribute__((unused)),
if (syslog_priority == new_priority)
return;
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
mark_always_logged(thd);
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
error_header();
fprintf(stderr, "SysLog priority was changed from '%s' to '%s'.\n",
syslog_priority_names[syslog_priority],
@@ -2922,7 +2951,7 @@ static void update_logging(MYSQL_THD thd,
ADD_ATOMIC(internal_stop_logging, 1);
if (!maria_55_started || !debug_server_started)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
if ((logging= new_logging))
{
start_logging();
@@ -2939,7 +2968,7 @@ static void update_logging(MYSQL_THD thd,
}
if (!maria_55_started || !debug_server_started)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
ADD_ATOMIC(internal_stop_logging, -1);
}
@@ -2954,13 +2983,13 @@ static void update_mode(MYSQL_THD thd __attribute__((unused)),
ADD_ATOMIC(internal_stop_logging, 1);
if (!maria_55_started || !debug_server_started)
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
mark_always_logged(thd);
error_header();
fprintf(stderr, "Logging mode was changed from %d to %d.\n", mode, new_mode);
mode= new_mode;
if (!maria_55_started || !debug_server_started)
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
ADD_ATOMIC(internal_stop_logging, -1);
}
@@ -2975,14 +3004,14 @@ static void update_syslog_ident(MYSQL_THD thd __attribute__((unused)),
syslog_ident= syslog_ident_buffer;
error_header();
fprintf(stderr, "SYSYLOG ident was changed to '%s'\n", syslog_ident);
- flogger_mutex_lock(&lock_operations);
+ mysql_prlock_wrlock(&lock_operations);
mark_always_logged(thd);
if (logging && output_type == OUTPUT_SYSLOG)
{
stop_logging();
start_logging();
}
- flogger_mutex_unlock(&lock_operations);
+ mysql_prlock_unlock(&lock_operations);
}
diff --git a/plugin/userstat/index_stats.cc b/plugin/userstat/index_stats.cc
index 0528507c50e..da0d4060b98 100644
--- a/plugin/userstat/index_stats.cc
+++ b/plugin/userstat/index_stats.cc
@@ -28,7 +28,7 @@ static int index_stats_fill(THD *thd, TABLE_LIST *tables, COND *cond)
tmp_table.grant.privilege= 0;
if (check_access(thd, SELECT_ACL, tmp_table.db.str,
&tmp_table.grant.privilege, NULL, 0, 1) ||
- check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX, 1))
+ check_grant(thd, SELECT_ACL, &tmp_table, 1, 1, 1))
continue;
index_name= tmp_table.table_name.str + tmp_table.table_name.length + 1;
diff --git a/plugin/userstat/table_stats.cc b/plugin/userstat/table_stats.cc
index 3119e516e06..f3150d81fa8 100644
--- a/plugin/userstat/table_stats.cc
+++ b/plugin/userstat/table_stats.cc
@@ -33,8 +33,7 @@ static int table_stats_fill(THD *thd, TABLE_LIST *tables, COND *cond)
tmp_table.grant.privilege= 0;
if (check_access(thd, SELECT_ACL, tmp_table.db.str,
&tmp_table.grant.privilege, NULL, 0, 1) ||
- check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX,
- 1))
+ check_grant(thd, SELECT_ACL, &tmp_table, 1, 1, 1))
continue;
table->field[0]->store(table_stats->table, schema_length,
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index eafb051ad60..192ca15c83e 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -99,7 +99,6 @@ INSTALL(FILES
${CMAKE_CURRENT_SOURCE_DIR}/mysql_test_db.sql
${CMAKE_CURRENT_SOURCE_DIR}/fill_help_tables.sql
${CMAKE_CURRENT_SOURCE_DIR}/mysql_test_data_timezone.sql
- ${CMAKE_CURRENT_SOURCE_DIR}/mysql_to_mariadb.sql
${CMAKE_CURRENT_BINARY_DIR}/maria_add_gis_sp.sql
${CMAKE_CURRENT_BINARY_DIR}/maria_add_gis_sp_bootstrap.sql
${FIX_PRIVILEGES_SQL}
diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql
index d4d16c74c11..020385992ea 100644
--- a/scripts/mysql_system_tables.sql
+++ b/scripts/mysql_system_tables.sql
@@ -93,7 +93,7 @@ CREATE DEFINER='mariadb.sys'@'localhost' SQL SECURITY DEFINER VIEW IF NOT EXISTS
CAST(IFNULL(JSON_VALUE(Priv, '$.max_user_connections'), 0) AS SIGNED) AS max_user_connections,
IFNULL(JSON_VALUE(Priv, '$.plugin'), '') AS plugin,
IFNULL(JSON_VALUE(Priv, '$.authentication_string'), '') AS authentication_string,
- 'N' AS password_expired,
+ IF(IFNULL(JSON_VALUE(Priv, '$.password_last_changed'), 1) = 0, 'Y', 'N') AS password_expired,
ELT(IFNULL(JSON_VALUE(Priv, '$.is_role'), 0) + 1, 'N', 'Y') AS is_role,
IFNULL(JSON_VALUE(Priv, '$.default_role'), '') AS default_role,
CAST(IFNULL(JSON_VALUE(Priv, '$.max_statement_time'), 0.0) AS DECIMAL(12,6)) AS max_statement_time
diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
index 758229618db..9dd775aaf30 100644
--- a/scripts/mysql_system_tables_fix.sql
+++ b/scripts/mysql_system_tables_fix.sql
@@ -27,6 +27,7 @@
set sql_mode='';
set storage_engine=Aria;
set enforce_storage_engine=NULL;
+set alter_algorithm=DEFAULT;
set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO');
@@ -649,8 +650,7 @@ UPDATE user SET Delete_history_priv = Super_priv WHERE @had_user_delete_history_
ALTER TABLE user ADD plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections,
ADD authentication_string TEXT NOT NULL AFTER plugin;
ALTER TABLE user CHANGE auth_string authentication_string TEXT NOT NULL;
-ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL,
- MODIFY authentication_string TEXT NOT NULL;
+
ALTER TABLE user ADD password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string;
ALTER TABLE user ADD password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL after password_expired;
ALTER TABLE user ADD password_lifetime smallint unsigned DEFAULT NULL after password_last_changed;
@@ -658,10 +658,25 @@ ALTER TABLE user ADD account_locked enum('N', 'Y') COLLATE utf8_general_ci DEFAU
ALTER TABLE user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER account_locked;
ALTER TABLE user ADD default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role;
ALTER TABLE user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role;
+
-- Somewhere above, we ran ALTER TABLE user .... CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin.
--- we want password_expired column to have collation utf8_general_ci.
-ALTER TABLE user MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE user MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+-- we want password_expired column to have collation utf8_general_ci.
+-- Order columns correctly that were not ordered until MDEV-23201 (ff8ffef3e1915d7a9caa07d9461cd8d47c4baf98)
+
+ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections,
+ MODIFY authentication_string TEXT NOT NULL AFTER plugin,
+ MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string,
+ MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER password_expired,
+ MODIFY default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role,
+ MODIFY max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role,
+-- MDEV-24122 formerly mysql5.7 users may have the following columns password_last_changed,
+-- password_lifetime and account_locked. Ensure they are beyond the end of the user columns
+-- used by MariaDB. MariaDB-10.4 will use these in the creation of mysql.global_priv.
+-- password_last_changed has a DEFAULT/ON UPDATE of CURRENT_TIMESTAMP to keep track of
+-- time until 10.4 added.
+ MODIFY IF EXISTS password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER max_statement_time,
+ MODIFY IF EXISTS password_lifetime smallint unsigned DEFAULT NULL AFTER password_last_changed,
+ MODIFY IF EXISTS account_locked enum('N', 'Y') CHARACTER SET utf8 DEFAULT 'N' NOT NULL after password_lifetime;
-- Checking for any duplicate hostname and username combination are exists.
-- If exits we will throw error.
@@ -809,7 +824,7 @@ IF 'BASE TABLE' = (select table_type from information_schema.tables where table_
DROP TABLE user;
END IF//
-IF 1 = (SELECT count(*) FROM information_schema.VIEWS WHERE TABLE_CATALOG = 'def' and TABLE_SCHEMA = 'mysql' and TABLE_NAME='user' and DEFINER = 'root@localhost') THEN
+IF 1 = (SELECT count(*) FROM information_schema.VIEWS WHERE TABLE_CATALOG = 'def' and TABLE_SCHEMA = 'mysql' and TABLE_NAME='user' and (DEFINER = 'root@localhost' or (DEFINER = 'mariadb.sys@localhost' and VIEW_DEFINITION LIKE "%'N' AS `password_expired`%"))) THEN
DROP VIEW IF EXISTS mysql.user;
END IF//
diff --git a/scripts/mysql_to_mariadb.sql b/scripts/mysql_to_mariadb.sql
deleted file mode 100644
index 4ee3f3a4214..00000000000
--- a/scripts/mysql_to_mariadb.sql
+++ /dev/null
@@ -1,22 +0,0 @@
--- Script that changes MySQL 5.7 privilege tables to MariaDB 10.x
--- This should be run first with
--- mysql --force mysql < mysql_to_mariadb.sql
--- It's ok to ignore any errors, as these usually means that the tables are
--- already fixed.
-
--- After this script s run, one should run at least:
--- mysql_upgrade --upgrade-system-tables
--- to get the other tables in the mysql database fixed.
-
--- Drop not existing columnms
-alter table mysql.user drop column `password_last_changed`, drop column `password_lifetime`, drop column `account_locked`;
-
--- Change existing columns
-alter table mysql.user change column `authentication_string` `auth_string` text COLLATE utf8_bin NOT NULL;
-
--- Add new columns
-alter table mysql.user add column `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '' after `user`, add column `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' after `auth_string`;
-alter table mysql.user add column `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', add column `max_statement_time` decimal(12,6) NOT NULL DEFAULT '0.000000';
-
--- Fix passwords
-update mysql.user set `password`=`auth_string`, plugin='' where plugin="mysql_native_password";
diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh
index 0b33f61710e..77ed36218d3 100644
--- a/scripts/mysqld_multi.sh
+++ b/scripts/mysqld_multi.sh
@@ -308,7 +308,9 @@ sub report_mysqlds
sub start_mysqlds()
{
- my (@groups, $com, $tmp, $i, @options, $j, $mysqld_found, $info_sent);
+ my (@groups, $com, $tmp, $i, @options, $j, $mysqld_found, $suffix_found, $info_sent);
+
+ $suffix_found= 0;
if (!$opt_no_log)
{
@@ -327,6 +329,7 @@ sub start_mysqlds()
$mysqld_found= 1; # The default
$mysqld_found= 0 if (!length($mysqld));
$com= "$mysqld";
+
for ($j = 0, $tmp= ""; defined($options[$j]); $j++)
{
if ("--mysqladmin=" eq substr($options[$j], 0, 13))
@@ -347,6 +350,10 @@ sub start_mysqlds()
$options[$j]= quote_shell_word($options[$j]);
$tmp.= " $options[$j]";
}
+ elsif ("--defaults-group-suffix=" eq substr($options[$j], 0, 24))
+ {
+ $suffix_found= 1;
+ }
else
{
$options[$j]= quote_shell_word($options[$j]);
@@ -363,6 +370,12 @@ sub start_mysqlds()
$info_sent= 1;
}
+ if (!$suffix_found)
+ {
+ $com.= " --defaults-group-suffix=";
+ $com.= substr($groups[$i],6);
+ }
+
$com.= $tmp;
if ($opt_wsrep_new_cluster) {
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index c5857713ec9..21ceebb1413 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -126,7 +126,7 @@ log_generic () {
case $logging in
init) ;; # Just echo the message, don't save it anywhere
file)
- if [ -n "$helper" ]; then
+ if [ "$helper_exist" -eq "0" ]; then
echo "$msg" | "$helper" "$user" log "$err_log"
fi
;;
@@ -150,7 +150,7 @@ eval_log_error () {
local cmd="$1"
case $logging in
file)
- if [ -n "$helper" ]; then
+ if [ "$helper_exist" -eq "0" ]; then
cmd="$cmd 2>&1 | "`shell_quote_string "$helper"`" $user log "`shell_quote_string "$err_log"`
fi
;;
@@ -533,10 +533,9 @@ fi
helper=`find_in_bin mysqld_safe_helper`
print_defaults=`find_in_bin my_print_defaults`
-
# Check if helper exists
-$helper --help >/dev/null 2>&1 || helper=""
-
+command -v $helper --help >/dev/null 2>&1
+helper_exist=$?
#
# Second, try to find the data directory
#
@@ -943,7 +942,6 @@ fi
# Avoid 'nohup: ignoring input' warning
test -n "$NOHUP_NICENESS" && cmd="$cmd < /dev/null"
-
log_notice "Starting $MYSQLD daemon with databases from $DATADIR"
# variable to track the current number of "fast" (a.k.a. subsecond) restarts
diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh
index 5e134570881..e7c825486cd 100644
--- a/scripts/wsrep_sst_common.sh
+++ b/scripts/wsrep_sst_common.sh
@@ -212,6 +212,9 @@ case "$1" in
"$option" != "--port" && \
"$option" != "--socket" ]]; then
value=${1#*=}
+ if [ "$value" == "$1" ]; then
+ value=""
+ fi
case "$option" in
'--innodb-data-home-dir')
if [ -z "$INNODB_DATA_HOME_DIR_ARG" ]; then
diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh
index 181cd45cf44..2359cbc0ced 100644
--- a/scripts/wsrep_sst_mariabackup.sh
+++ b/scripts/wsrep_sst_mariabackup.sh
@@ -1,6 +1,6 @@
#!/bin/bash -ue
# Copyright (C) 2013 Percona Inc
-# Copyright (C) 2017-2020 MariaDB
+# Copyright (C) 2017-2021 MariaDB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -851,7 +851,7 @@ then
-z $(parse_cnf --mysqld tmpdir "") && \
-z $(parse_cnf xtrabackup tmpdir "") ]]; then
xtmpdir=$(mktemp -d)
- tmpopts=" --tmpdir=$xtmpdir"
+ tmpopts="--tmpdir=$xtmpdir"
wsrep_log_info "Using $xtmpdir as xtrabackup temporary directory"
fi
@@ -864,10 +864,10 @@ then
fi
if [ -n "${WSREP_SST_OPT_PSWD:-}" ]; then
- INNOEXTRA+=" --password=$WSREP_SST_OPT_PSWD"
+ export MYSQL_PWD=$WSREP_SST_OPT_PSWD
elif [[ $usrst -eq 1 ]];then
- # Empty password, used for testing, debugging etc.
- INNOEXTRA+=" --password="
+ # Empty password, used for testing, debugging etc.
+ unset MYSQL_PWD
fi
check_extra
diff --git a/sql/compat56.h b/sql/compat56.h
index 347d6145048..65cd36dacfd 100644
--- a/sql/compat56.h
+++ b/sql/compat56.h
@@ -30,8 +30,8 @@
#define MY_PACKED_TIME_GET_INT_PART(x) ((x) >> 24)
#define MY_PACKED_TIME_GET_FRAC_PART(x) ((x) % (1LL << 24))
-#define MY_PACKED_TIME_MAKE(i, f) ((((longlong) (i)) << 24) + (f))
-#define MY_PACKED_TIME_MAKE_INT(i) ((((longlong) (i)) << 24))
+#define MY_PACKED_TIME_MAKE(i, f) ((((ulonglong) (i)) << 24) + (f))
+#define MY_PACKED_TIME_MAKE_INT(i) ((((ulonglong) (i)) << 24))
longlong TIME_to_longlong_datetime_packed(const MYSQL_TIME *);
longlong TIME_to_longlong_time_packed(const MYSQL_TIME *);
diff --git a/sql/contributors.h b/sql/contributors.h
index 34f06087c8c..e16448ee985 100644
--- a/sql/contributors.h
+++ b/sql/contributors.h
@@ -37,22 +37,17 @@ struct show_table_contributors_st {
struct show_table_contributors_st show_table_contributors[]= {
/* MariaDB foundation sponsors, in contribution, size , time order */
- {"Booking.com", "https://www.booking.com", "Founding member, Platinum Sponsor of the MariaDB Foundation"},
{"Alibaba Cloud", "https://www.alibabacloud.com/", "Platinum Sponsor of the MariaDB Foundation"},
{"Tencent Cloud", "https://cloud.tencent.com", "Platinum Sponsor of the MariaDB Foundation"},
{"Microsoft", "https://microsoft.com/", "Platinum Sponsor of the MariaDB Foundation"},
{"MariaDB Corporation", "https://mariadb.com", "Founding member, Platinum Sponsor of the MariaDB Foundation"},
+ {"ServiceNow", "https://servicenow.com", "Platinum Sponsor of the MariaDB Foundation"},
{"Visma", "https://visma.com", "Gold Sponsor of the MariaDB Foundation"},
{"DBS", "https://dbs.com", "Gold Sponsor of the MariaDB Foundation"},
{"IBM", "https://www.ibm.com", "Gold Sponsor of the MariaDB Foundation"},
- {"Tencent Games", "http://game.qq.com/", "Gold Sponsor of the MariaDB Foundation"},
- {"Nexedi", "https://www.nexedi.com", "Silver Sponsor of the MariaDB Foundation"},
- {"Acronis", "https://www.acronis.com", "Silver Sponsor of the MariaDB Foundation"},
- {"Verkkokauppa.com", "https://www.verkkokauppa.com", "Bronze Sponsor of the MariaDB Foundation"},
- {"Virtuozzo", "https://virtuozzo.com", "Bronze Sponsor of the MariaDB Foundation"},
- {"Tencent Game DBA", "http://tencentdba.com/about", "Bronze Sponsor of the MariaDB Foundation"},
- {"Tencent TDSQL", "http://tdsql.org", "Bronze Sponsor of the MariaDB Foundation"},
- {"Percona", "https://www.percona.com/", "Bronze Sponsor of the MariaDB Foundation"},
+ {"Automattic", "https://automattic.com", "Silver Sponsor of the MariaDB Foundation"},
+ {"Percona", "https://www.percona.com/", "Sponsor of the MariaDB Foundation"},
+ {"Galera Cluster", "https://galeracluster.com", "Sponsor of the MariaDB Foundation"},
/* Sponsors of important features */
{"Google", "USA", "Sponsoring encryption, parallel replication and GTID"},
diff --git a/sql/create_options.cc b/sql/create_options.cc
index a8d997efaf4..066adcd92e3 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -98,14 +98,13 @@ static bool report_unknown_option(THD *thd, engine_option_value *val,
{
DBUG_ENTER("report_unknown_option");
- if (val->parsed || suppress_warning)
+ if (val->parsed || suppress_warning || thd->slave_thread)
{
DBUG_PRINT("info", ("parsed => exiting"));
DBUG_RETURN(FALSE);
}
- if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) &&
- !thd->slave_thread)
+ if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS))
{
my_error(ER_UNKNOWN_OPTION, MYF(0), val->name.str);
DBUG_RETURN(TRUE);
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 70faeee6039..bc6d4587064 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -180,6 +180,7 @@ Event_queue_element_for_exec::init(const LEX_CSTRING *db, const LEX_CSTRING *n)
if (!(name.str= my_strndup(n->str, name.length= n->length, MYF(MY_WME))))
{
my_free(const_cast<char*>(dbname.str));
+ dbname.str= NULL;
return TRUE;
}
return FALSE;
diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h
index e5e3e4eb087..eb4f8590512 100644
--- a/sql/event_data_objects.h
+++ b/sql/event_data_objects.h
@@ -33,7 +33,7 @@ struct TABLE;
class Event_queue_element_for_exec
{
public:
- Event_queue_element_for_exec(){};
+ Event_queue_element_for_exec() : dbname{nullptr, 0}, name{nullptr, 0} {}
~Event_queue_element_for_exec();
bool
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index 91c243b3f70..96a5e93cfe6 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -639,6 +639,7 @@ Event_queue::get_top_for_execution_if_time(THD *thd,
if (!(*event_name= new Event_queue_element_for_exec()) ||
(*event_name)->init(&top->dbname, &top->name))
{
+ delete *event_name;
ret= TRUE;
break;
}
diff --git a/sql/events.cc b/sql/events.cc
index 195c0fa09e2..5e15b92dc49 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -658,8 +658,16 @@ Events::drop_schema_events(THD *thd, const char *db)
*/
if (event_queue)
event_queue->drop_schema_events(thd, &db_lex);
- db_repository->drop_schema_events(thd, &db_lex);
-
+ if (db_repository)
+ db_repository->drop_schema_events(thd, &db_lex);
+ else
+ {
+ if ((db_repository= new Event_db_repository))
+ {
+ db_repository->drop_schema_events(thd, &db_lex);
+ delete db_repository;
+ }
+ }
DBUG_VOID_RETURN;
}
diff --git a/sql/field.cc b/sql/field.cc
index 5a694b50fe0..89c51288de8 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -8098,11 +8098,10 @@ uint Field_varstring::get_key_image(uchar *buff, uint length,
{
String val;
uint local_char_length;
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
val_str(&val, &val);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
local_char_length= val.charpos(length / field_charset->mbmaxlen);
if (local_char_length < val.length())
@@ -8494,7 +8493,7 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs)
DBUG_ASSERT(length <= max_data_length());
new_length= length;
- copy_length= (size_t)MY_MIN(UINT_MAX,table->in_use->variables.group_concat_max_len);
+ copy_length= table->in_use->variables.group_concat_max_len;
if (new_length > copy_length)
{
new_length= Well_formed_prefix(cs,
@@ -11077,16 +11076,26 @@ Column_definition::Column_definition(THD *thd, Field *old_field,
CREATE TABLE t1 (a INT) AS SELECT a FROM t2;
See Type_handler::Column_definition_redefine_stage1()
for data type specific code.
+
+ @param this - The field definition corresponding to the expression
+ in the "AS SELECT.." part.
+
+ @param dup_field - The field definition from the "CREATE TABLE (...)" part.
+ It has already underwent prepare_stage1(), so
+ must be fully initialized:
+ -- dup_field->charset is set and BINARY
+ sorting style is applied, see find_bin_collation().
+
+ @param file - The table handler
*/
void
Column_definition::redefine_stage1_common(const Column_definition *dup_field,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
{
set_handler(dup_field->type_handler());
default_value= dup_field->default_value;
- charset= dup_field->charset ? dup_field->charset :
- schema->default_table_charset;
+ DBUG_ASSERT(dup_field->charset); // Set by prepare_stage1()
+ charset= dup_field->charset;
length= dup_field->char_length;
pack_length= dup_field->pack_length;
key_length= dup_field->key_length;
@@ -11396,7 +11405,7 @@ key_map Field::get_possible_keys()
bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
bool rc;
if ((rc= validate_value_in_record(thd, record)))
{
@@ -11408,7 +11417,7 @@ bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record)
ER_THD(thd, ER_INVALID_DEFAULT_VALUE_FOR_FIELD),
ErrConvString(&tmp).ptr(), field_name.str);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return rc;
}
diff --git a/sql/field.h b/sql/field.h
index a512d74b444..421d6bddb2d 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1394,8 +1394,6 @@ public:
virtual uint max_packed_col_length(uint max_length)
{ return max_length;}
- virtual bool is_packable() const { return false; }
-
uint offset(const uchar *record) const
{
return (uint) (ptr - record);
@@ -1872,7 +1870,7 @@ public:
uchar null_bit_arg, utype unireg_check_arg,
const LEX_CSTRING *field_name_arg,
const DTCollation &collation);
- uint decimals() const { return NOT_FIXED_DEC; }
+ uint decimals() const { return is_created_from_null_item ? 0 : NOT_FIXED_DEC; }
int save_in_field(Field *to) { return save_in_field_str(to); }
bool memcpy_field_possible(const Field *from) const
{
@@ -1988,7 +1986,6 @@ public:
bool can_optimize_range(const Item_bool_func *cond,
const Item *item,
bool is_eq_func) const;
- bool is_packable() const { return true; }
};
/* base class for float and double and decimal (old one) */
@@ -4537,7 +4534,13 @@ public:
void move_field_offset(my_ptrdiff_t ptr_diff)
{
Field::move_field_offset(ptr_diff);
- bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
+
+ /*
+ clang does not like when things are added to a null pointer, even if
+ it is never referenced.
+ */
+ if (bit_ptr)
+ bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
}
void hash(ulong *nr, ulong *nr2);
@@ -4634,6 +4637,11 @@ public:
void frm_pack_charset(uchar *buff) const;
void frm_unpack_basic(const uchar *buff);
bool frm_unpack_charset(TABLE_SHARE *share, const uchar *buff);
+ CHARSET_INFO *explicit_or_derived_charset(const Column_derived_attributes
+ *derived_attr) const
+ {
+ return charset ? charset : derived_attr->charset();
+ }
};
@@ -4768,6 +4776,15 @@ public:
void create_length_to_internal_length_bit();
void create_length_to_internal_length_newdecimal();
+ /*
+ Prepare the "charset" member for string data types,
+ such as CHAR, VARCHAR, TEXT, ENUM, SET:
+ - derive the charset if not specified explicitly
+ - find a _bin collation if the BINARY comparison style was specified, e.g.:
+ CREATE TABLE t1 (a VARCHAR(10) BINARY) CHARSET utf8;
+ */
+ bool prepare_charset_for_string(const Column_derived_attributes *dattr);
+
/**
Prepare a SET/ENUM field.
Create "interval" from "interval_list" if needed, and adjust "length".
@@ -4803,7 +4820,13 @@ public:
bool sp_prepare_create_field(THD *thd, MEM_ROOT *mem_root);
bool prepare_stage1(THD *thd, MEM_ROOT *mem_root,
- handler *file, ulonglong table_flags);
+ handler *file, ulonglong table_flags,
+ const Column_derived_attributes *derived_attr);
+ void prepare_stage1_simple(CHARSET_INFO *cs)
+ {
+ charset= cs;
+ create_length_to_internal_length_simple();
+ }
bool prepare_stage1_typelib(THD *thd, MEM_ROOT *mem_root,
handler *file, ulonglong table_flags);
bool prepare_stage1_string(THD *thd, MEM_ROOT *mem_root,
@@ -4811,15 +4834,19 @@ public:
bool prepare_stage1_bit(THD *thd, MEM_ROOT *mem_root,
handler *file, ulonglong table_flags);
+ bool bulk_alter(const Column_derived_attributes *derived_attr,
+ const Column_bulk_alter_attributes *bulk_attr)
+ {
+ return type_handler()->Column_definition_bulk_alter(this,
+ derived_attr,
+ bulk_attr);
+ }
void redefine_stage1_common(const Column_definition *dup_field,
- const handler *file,
- const Schema_specification_st *schema);
- bool redefine_stage1(const Column_definition *dup_field, const handler *file,
- const Schema_specification_st *schema)
+ const handler *file);
+ bool redefine_stage1(const Column_definition *dup_field, const handler *file)
{
const Type_handler *handler= dup_field->type_handler();
- return handler->Column_definition_redefine_stage1(this, dup_field,
- file, schema);
+ return handler->Column_definition_redefine_stage1(this, dup_field, file);
}
bool prepare_stage2(handler *handler, ulonglong table_flags);
bool prepare_stage2_blob(handler *handler,
diff --git a/sql/filesort.cc b/sql/filesort.cc
index ad4cb2b6e6b..aa25474be1a 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1978,14 +1978,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
if (sortorder->field)
{
CHARSET_INFO *cs= sortorder->field->sort_charset();
- sortorder->type= sortorder->field->is_packable() ?
- SORT_FIELD_ATTR::VARIABLE_SIZE :
- SORT_FIELD_ATTR::FIXED_SIZE;
-
sortorder->length= sortorder->field->sort_length();
- if (sortorder->is_variable_sized())
- set_if_smaller(sortorder->length, thd->variables.max_sort_length);
-
if (use_strnxfrm((cs=sortorder->field->sort_charset())))
{
*multi_byte_charset= true;
@@ -1996,10 +1989,6 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
}
else
{
- sortorder->type= sortorder->item->type_handler()->is_packable() ?
- SORT_FIELD_ATTR::VARIABLE_SIZE :
- SORT_FIELD_ATTR::FIXED_SIZE;
-
sortorder->item->type_handler()->sortlength(thd, sortorder->item,
sortorder);
if (use_strnxfrm(sortorder->item->collation.collation))
@@ -2009,8 +1998,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
if (sortorder->item->maybe_null)
length++; // Place for NULL marker
}
- if (sortorder->is_variable_sized())
- set_if_smaller(sortorder->length, thd->variables.max_sort_length);
+ set_if_smaller(sortorder->length, thd->variables.max_sort_length);
length+=sortorder->length;
}
sortorder->field= (Field*) 0; // end marker
diff --git a/sql/gcalc_tools.h b/sql/gcalc_tools.h
index 77da791f0b9..e625b355d95 100644
--- a/sql/gcalc_tools.h
+++ b/sql/gcalc_tools.h
@@ -184,7 +184,11 @@ class Gcalc_result_receiver
double first_x, first_y, prev_x, prev_y;
double shape_area;
public:
- Gcalc_result_receiver() : collection_result(FALSE), n_shapes(0), n_holes(0)
+Gcalc_result_receiver() :
+ n_points(0),
+ common_shapetype(Gcalc_function::shape_point),
+ collection_result(FALSE), n_shapes(0), n_holes(0),
+ cur_shape(Gcalc_function::shape_point), shape_pos(0)
{}
int start_shape(Gcalc_function::shape_type shape);
int add_point(double x, double y);
diff --git a/sql/gen_win_tzname_data.ps1 b/sql/gen_win_tzname_data.ps1
index c0a37d21895..474ab889d25 100644
--- a/sql/gen_win_tzname_data.ps1
+++ b/sql/gen_win_tzname_data.ps1
@@ -4,6 +4,7 @@
write-output "/* This file was generated using gen_win_tzname_data.ps1 */"
$xdoc = new-object System.Xml.XmlDocument
+[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
$xdoc.load("https://raw.githubusercontent.com/unicode-org/cldr/master/common/supplemental/windowsZones.xml")
$nodes = $xdoc.SelectNodes("//mapZone[@territory='001']") # use default territory (001)
foreach ($node in $nodes) {
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 609e2135a99..24e08501bed 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2005, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2020, MariaDB
+ Copyright (c) 2009, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -4282,7 +4282,7 @@ int ha_partition::write_row(const uchar * buf)
int error;
longlong func_value;
bool have_auto_increment= table->next_number_field && buf == table->record[0];
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
THD *thd= ha_thd();
sql_mode_t saved_sql_mode= thd->variables.sql_mode;
bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
@@ -4324,9 +4324,9 @@ int ha_partition::write_row(const uchar * buf)
}
}
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
if (unlikely(error))
{
m_part_info->err_value= func_value;
@@ -6461,6 +6461,7 @@ int ha_partition::multi_range_read_init(RANGE_SEQ_IF *seq,
DBUG_ENTER("ha_partition::multi_range_read_init");
DBUG_PRINT("enter", ("partition this: %p", this));
+ eq_range= 0;
m_seq_if= seq;
m_seq= seq->init(seq_init_param, n_ranges, mrr_mode);
if (unlikely((error= multi_range_key_create_key(seq, m_seq))))
@@ -9060,7 +9061,6 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_STARTING_ORDERED_INDEX_SCAN:
case HA_EXTRA_BEGIN_ALTER_COPY:
case HA_EXTRA_END_ALTER_COPY:
- case HA_EXTRA_FAKE_START_STMT:
DBUG_RETURN(loop_partitions(extra_cb, &operation));
default:
{
@@ -11326,13 +11326,12 @@ int ha_partition::bulk_update_row(const uchar *old_data, const uchar *new_data,
int error= 0;
uint32 part_id;
longlong func_value;
- my_bitmap_map *old_map;
DBUG_ENTER("ha_partition::bulk_update_row");
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
if (unlikely(error))
{
m_part_info->err_value= func_value;
diff --git a/sql/handler.cc b/sql/handler.cc
index ed12830ce20..ac5b43249db 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -859,6 +859,7 @@ static my_bool kill_handlerton(THD *thd, plugin_ref plugin,
{
handlerton *hton= plugin_hton(plugin);
+ mysql_mutex_assert_owner(&thd->LOCK_thd_data);
if (hton->state == SHOW_OPTION_YES && hton->kill_query &&
thd_get_ha_data(thd, hton))
hton->kill_query(hton, thd, *(enum thd_kill_levels *) level);
@@ -3322,6 +3323,13 @@ int handler::update_auto_increment()
(table->auto_increment_field_not_null &&
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO))
{
+
+ /*
+ There could be an error reported because value was truncated
+ when strict mode is enabled.
+ */
+ if (thd->is_error())
+ DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
Update next_insert_id if we had already generated a value in this
statement (case of INSERT VALUES(null),(3763),(null):
@@ -3335,25 +3343,27 @@ int handler::update_auto_increment()
DBUG_RETURN(0);
}
- // ALTER TABLE ... ADD COLUMN ... AUTO_INCREMENT
- if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
+ if (table->versioned())
{
- if (table->versioned())
+ Field *end= table->vers_end_field();
+ DBUG_ASSERT(end);
+ bitmap_set_bit(table->read_set, end->field_index);
+ if (!end->is_max())
{
- Field *end= table->vers_end_field();
- DBUG_ASSERT(end);
- bitmap_set_bit(table->read_set, end->field_index);
- if (!end->is_max())
+ if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
{
if (!table->next_number_field->real_maybe_null())
DBUG_RETURN(HA_ERR_UNSUPPORTED);
table->next_number_field->set_null();
- DBUG_RETURN(0);
}
+ DBUG_RETURN(0);
}
- table->next_number_field->set_notnull();
}
+ // ALTER TABLE ... ADD COLUMN ... AUTO_INCREMENT
+ if (thd->lex->sql_command == SQLCOM_ALTER_TABLE)
+ table->next_number_field->set_notnull();
+
if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum())
{
/* next_insert_id is beyond what is reserved, so we reserve more. */
@@ -5204,6 +5214,7 @@ int ha_create_table(THD *thd, const char *path,
char name_buff[FN_REFLEN];
const char *name;
TABLE_SHARE share;
+ Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
bool temp_table __attribute__((unused)) =
create_info->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER);
DBUG_ENTER("ha_create_table");
@@ -7605,6 +7616,11 @@ bool Vers_parse_info::fix_alter_info(THD *thd, Alter_info *alter_info,
{
if (f->flags & VERS_SYSTEM_FIELD)
{
+ if (!table->versioned())
+ {
+ my_error(ER_VERS_NOT_VERSIONED, MYF(0), table->s->table_name.str);
+ return true;
+ }
my_error(ER_VERS_DUPLICATE_ROW_START_END, MYF(0),
f->flags & VERS_SYS_START_FLAG ? "START" : "END", f->field_name.str);
return true;
diff --git a/sql/handler.h b/sql/handler.h
index 0c8be2154a9..891187db171 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2,7 +2,7 @@
#define HANDLER_INCLUDED
/*
Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2020, MariaDB
+ Copyright (c) 2009, 2021, MariaDB
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -1507,7 +1507,7 @@ struct handlerton
enum handler_create_iterator_result
(*create_iterator)(handlerton *hton, enum handler_iterator_type type,
struct handler_iterator *fill_this_in);
- int (*abort_transaction)(handlerton *hton, THD *bf_thd,
+ void (*abort_transaction)(handlerton *hton, THD *bf_thd,
THD *victim_thd, my_bool signal);
int (*set_checkpoint)(handlerton *hton, const XID* xid);
int (*get_checkpoint)(handlerton *hton, XID* xid);
@@ -1725,6 +1725,12 @@ handlerton *ha_default_tmp_handlerton(THD *thd);
/* can be replicated by wsrep replication provider plugin */
#define HTON_WSREP_REPLICATION (1 << 13)
+/*
+ Table requires and close and reopen after truncate
+ If the handler has HTON_CAN_RECREATE, this flag is not used
+*/
+#define HTON_REQUIRES_CLOSE_AFTER_TRUNCATE (1 << 18)
+
class Ha_trx_info;
struct THD_TRANS
@@ -2086,7 +2092,7 @@ public:
struct Table_scope_and_contents_source_pod_st // For trivial members
{
- CHARSET_INFO *table_charset;
+ CHARSET_INFO *alter_table_convert_to_charset;
LEX_CUSTRING tabledef_version;
LEX_CSTRING connect_string;
LEX_CSTRING comment;
@@ -2231,7 +2237,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st,
DBUG_ASSERT(cs);
if (check_conflicting_charset_declarations(cs))
return true;
- table_charset= default_table_charset= cs;
+ alter_table_convert_to_charset= default_table_charset= cs;
used_fields|= (HA_CREATE_USED_CHARSET | HA_CREATE_USED_DEFAULT_CHARSET);
return false;
}
@@ -3191,7 +3197,7 @@ public:
{
cached_table_flags= table_flags();
}
- /* ha_ methods: pubilc wrappers for private virtual API */
+ /* ha_ methods: public wrappers for private virtual API */
int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked,
MEM_ROOT *mem_root= 0, List<String> *partitions_to_open=NULL);
diff --git a/sql/item.cc b/sql/item.cc
index f700bcfe680..6c52ade0d9f 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2010, 2020, MariaDB Corporation.
+ Copyright (c) 2010, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1423,9 +1423,9 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
Sql_mode_save sql_mode(thd);
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
res= save_in_field(field, no_conversions);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return res;
}
@@ -2599,9 +2599,7 @@ Item_sp::Item_sp(THD *thd, Name_resolution_context *context_arg,
dummy_table= (TABLE*) thd->calloc(sizeof(TABLE) + sizeof(TABLE_SHARE) +
sizeof(Query_arena));
dummy_table->s= (TABLE_SHARE*) (dummy_table + 1);
- /* TODO(cvicentiu) Move this sp_query_arena in the class as a direct member.
- Currently it can not be done due to header include dependencies. */
- sp_query_arena= (Query_arena *) (dummy_table->s + 1);
+ sp_query_arena= new(dummy_table->s + 1) Query_arena();
memset(&sp_mem_root, 0, sizeof(sp_mem_root));
}
@@ -2612,7 +2610,7 @@ Item_sp::Item_sp(THD *thd, Item_sp *item):
dummy_table= (TABLE*) thd->calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE) +
sizeof(Query_arena));
dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
- sp_query_arena= (Query_arena *) (dummy_table->s + 1);
+ sp_query_arena= new(dummy_table->s + 1) Query_arena();
memset(&sp_mem_root, 0, sizeof(sp_mem_root));
}
@@ -4978,13 +4976,19 @@ bool Item_ref_null_helper::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuz
@param resolved_item item which was resolved in outer SELECT(for warning)
@param mark_item item which should be marked (can be differ in case of
substitution)
+ @param suppress_warning_output flag specifying whether to suppress output of
+ a warning message
*/
static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
Item_ident *resolved_item,
- Item_ident *mark_item)
+ Item_ident *mark_item,
+ bool suppress_warning_output)
{
DBUG_ENTER("mark_as_dependent");
+ DBUG_PRINT("info", ("current select: %d (%p) last: %d (%p)",
+ current->select_number, current,
+ (last ? last->select_number : 0), last));
/* store pointer on SELECT_LEX from which item is dependent */
if (mark_item && mark_item->can_be_depended)
@@ -4995,7 +4999,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
if (current->mark_as_dependent(thd, last,
/** resolved_item psergey-thu **/ mark_item))
DBUG_RETURN(TRUE);
- if (thd->lex->describe & DESCRIBE_EXTENDED)
+ if ((thd->lex->describe & DESCRIBE_EXTENDED) && !suppress_warning_output)
{
const char *db_name= (resolved_item->db_name ?
resolved_item->db_name : "");
@@ -5024,6 +5028,8 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
@param found_item Item which was found during resolving (if resolved
identifier belongs to VIEW)
@param resolved_item Identifier which was resolved
+ @param suppress_warning_output flag specifying whether to suppress output of
+ a warning message
@note
We have to mark all items between current_sel (including) and
@@ -5037,7 +5043,8 @@ void mark_select_range_as_dependent(THD *thd,
SELECT_LEX *last_select,
SELECT_LEX *current_sel,
Field *found_field, Item *found_item,
- Item_ident *resolved_item)
+ Item_ident *resolved_item,
+ bool suppress_warning_output)
{
/*
Go from current SELECT to SELECT where field was resolved (it
@@ -5072,7 +5079,7 @@ void mark_select_range_as_dependent(THD *thd,
found_field->table->map;
prev_subselect_item->const_item_cache= 0;
mark_as_dependent(thd, last_select, current_sel, resolved_item,
- dependent);
+ dependent, suppress_warning_output);
}
}
@@ -5422,8 +5429,9 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
Name_resolution_context *outer_context= 0;
SELECT_LEX *select= 0;
/* Currently derived tables cannot be correlated */
- if (current_sel->master_unit()->first_select()->get_linkage() !=
- DERIVED_TABLE_TYPE)
+ if ((current_sel->master_unit()->first_select()->get_linkage() !=
+ DERIVED_TABLE_TYPE) &&
+ current_sel->master_unit()->outer_select())
outer_context= context->outer_context;
/*
@@ -5538,7 +5546,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
context->select_lex, this,
((ref_type == REF_ITEM ||
ref_type == FIELD_ITEM) ?
- (Item_ident*) (*reference) : 0));
+ (Item_ident*) (*reference) : 0), false);
return 0;
}
}
@@ -5550,7 +5558,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
context->select_lex, this,
((ref_type == REF_ITEM || ref_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
- 0));
+ 0), false);
if (thd->lex->in_sum_func &&
thd->lex->in_sum_func->nest_level >= select->nest_level)
{
@@ -5664,7 +5672,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
set_max_sum_func_level(thd, select);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, rf,
- rf);
+ rf, false);
return 0;
}
@@ -5677,7 +5685,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
set_max_sum_func_level(thd, select);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex,
- this, (Item_ident*)*reference);
+ this, (Item_ident*)*reference, false);
if (last_checked_context->select_lex->having_fix_field)
{
Item_ref *rf;
@@ -5753,7 +5761,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
Field *from_field= (Field *)not_found_field;
bool outer_fixed= false;
SELECT_LEX *select= thd->lex->current_select;
-
+
if (select && select->in_tvc)
{
my_error(ER_FIELD_REFERENCE_IN_TVC, MYF(0), full_name());
@@ -6198,12 +6206,14 @@ Item *Item_field::replace_equal_field(THD *thd, uchar *arg)
item_equal->compare_type_handler()->cmp_type());
return const_item2;
}
- Item_field *subst=
- (Item_field *)(item_equal->get_first(param->context_tab, this));
+ Item_ident *subst=
+ (Item_ident *) (item_equal->get_first(param->context_tab, this));
if (subst)
- subst= (Item_field *) (subst->real_item());
- if (subst && !field->eq(subst->field))
- return subst;
+ {
+ Item_field *subst2= (Item_field *) (subst->real_item());
+ if (subst2 && !field->eq(subst2->field))
+ return subst2;
+ }
}
return this;
}
@@ -6652,7 +6662,7 @@ Item *Item_string::make_odbc_literal(THD *thd, const LEX_CSTRING *typestr)
}
-static int save_int_value_in_field (Field *field, longlong nr,
+static int save_int_value_in_field (Field *field, longlong nr,
bool null_value, bool unsigned_flag)
{
if (null_value)
@@ -7648,7 +7658,7 @@ public:
if (tbl->table == item->field->table)
{
if (sel != current_select)
- mark_as_dependent(thd, sel, current_select, item, item);
+ mark_as_dependent(thd, sel, current_select, item, item, false);
return;
}
}
@@ -7844,7 +7854,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
((refer_type == REF_ITEM ||
refer_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
- 0));
+ 0), false);
/*
view reference found, we substituted it instead of this
Item, so can quit
@@ -7894,7 +7904,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
goto error;
thd->change_item_tree(reference, fld);
mark_as_dependent(thd, last_checked_context->select_lex,
- current_sel, fld, fld);
+ current_sel, fld, fld, false);
/*
A reference is resolved to a nest level that's outer or the same as
the nest level of the enclosing set function : adjust the value of
@@ -7917,7 +7927,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
/* Should be checked in resolve_ref_in_select_and_group(). */
DBUG_ASSERT(*ref && (*ref)->is_fixed());
mark_as_dependent(thd, last_checked_context->select_lex,
- context->select_lex, this, this);
+ context->select_lex, this, this, false);
/*
A reference is resolved to a nest level that's outer or the same as
the nest level of the enclosing set function : adjust the value of
@@ -8427,6 +8437,22 @@ bool Item_direct_ref::val_native(THD *thd, Native *to)
}
+longlong Item_direct_ref::val_time_packed(THD *thd)
+{
+ longlong tmp = (*ref)->val_time_packed(thd);
+ null_value= (*ref)->null_value;
+ return tmp;
+}
+
+
+longlong Item_direct_ref::val_datetime_packed(THD *thd)
+{
+ longlong tmp = (*ref)->val_datetime_packed(thd);
+ null_value= (*ref)->null_value;
+ return tmp;
+}
+
+
Item_cache_wrapper::~Item_cache_wrapper()
{
DBUG_ASSERT(expr_cache == 0);
@@ -9279,8 +9305,9 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
memcpy((void *)def_field, (void *)field_arg->field,
field_arg->field->size_of());
def_field->reset_fields();
- // If non-constant default value expression
- if (def_field->default_value && def_field->default_value->flags)
+ // If non-constant default value expression or a blob
+ if (def_field->default_value &&
+ (def_field->default_value->flags || (def_field->flags & BLOB_FLAG)))
{
uchar *newptr= (uchar*) thd->alloc(1+def_field->pack_length());
if (!newptr)
@@ -9383,11 +9410,60 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
return Item_field::save_in_field(field_arg, no_conversions);
}
+double Item_default_value::val_result()
+{
+ calculate();
+ return Item_field::val_result();
+}
+
+longlong Item_default_value::val_int_result()
+{
+ calculate();
+ return Item_field::val_int_result();
+}
+
+String *Item_default_value::str_result(String* tmp)
+{
+ calculate();
+ return Item_field::str_result(tmp);
+}
+
+bool Item_default_value::val_bool_result()
+{
+ calculate();
+ return Item_field::val_bool_result();
+}
+
+bool Item_default_value::is_null_result()
+{
+ calculate();
+ return Item_field::is_null_result();
+}
+
+my_decimal *Item_default_value::val_decimal_result(my_decimal *decimal_value)
+{
+ calculate();
+ return Item_field::val_decimal_result(decimal_value);
+}
+
+bool Item_default_value::get_date_result(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate)
+{
+ calculate();
+ return Item_field::get_date_result(thd, ltime, fuzzydate);
+}
+
+bool Item_default_value::val_native_result(THD *thd, Native *to)
+{
+ calculate();
+ return Item_field::val_native_result(thd, to);
+}
+
table_map Item_default_value::used_tables() const
{
if (!field || !field->default_value)
return static_cast<table_map>(0);
- if (!field->default_value->expr) // not fully parsed field
+ if (!field->default_value->expr) // not fully parsed field
return static_cast<table_map>(RAND_TABLE_BIT);
return field->default_value->expr->used_tables();
}
@@ -9763,7 +9839,7 @@ bool Item_cache_int::cache_value()
return FALSE;
value_cached= TRUE;
value= example->val_int_result();
- null_value= example->null_value;
+ null_value_inside= null_value= example->null_value;
unsigned_flag= example->unsigned_flag;
return TRUE;
}
@@ -9840,7 +9916,7 @@ bool Item_cache_temporal::cache_value()
return false;
value_cached= true;
value= example->val_datetime_packed_result(current_thd);
- null_value= example->null_value;
+ null_value_inside= null_value= example->null_value;
return true;
}
@@ -9851,7 +9927,7 @@ bool Item_cache_time::cache_value()
return false;
value_cached= true;
value= example->val_time_packed_result(current_thd);
- null_value= example->null_value;
+ null_value_inside= null_value= example->null_value;
return true;
}
@@ -9993,7 +10069,7 @@ bool Item_cache_real::cache_value()
return FALSE;
value_cached= TRUE;
value= example->val_result();
- null_value= example->null_value;
+ null_value_inside= null_value= example->null_value;
return TRUE;
}
@@ -10060,7 +10136,8 @@ bool Item_cache_decimal::cache_value()
return FALSE;
value_cached= TRUE;
my_decimal *val= example->val_decimal_result(&decimal_value);
- if (!(null_value= example->null_value) && val != &decimal_value)
+ if (!(null_value_inside= null_value= example->null_value) &&
+ val != &decimal_value)
my_decimal2decimal(val, &decimal_value);
return TRUE;
}
@@ -10109,11 +10186,14 @@ Item *Item_cache_decimal::convert_to_basic_const_item(THD *thd)
bool Item_cache_str::cache_value()
{
if (!example)
+ {
+ DBUG_ASSERT(value_cached == FALSE);
return FALSE;
+ }
value_cached= TRUE;
value_buff.set(buffer, sizeof(buffer), example->collation.collation);
value= example->str_result(&value_buff);
- if ((null_value= example->null_value))
+ if ((null_value= null_value_inside= example->null_value))
value= 0;
else if (value != &value_buff)
{
@@ -10208,6 +10288,8 @@ Item *Item_cache_str::convert_to_basic_const_item(THD *thd)
bool Item_cache_row::setup(THD *thd, Item *item)
{
example= item;
+ null_value= true;
+
if (!values && allocate(thd, item->cols()))
return 1;
for (uint i= 0; i < item_count; i++)
@@ -10240,12 +10322,19 @@ bool Item_cache_row::cache_value()
if (!example)
return FALSE;
value_cached= TRUE;
- null_value= 0;
+ null_value= TRUE;
+ null_value_inside= false;
example->bring_value();
+
+ /*
+ For Item_cache_row null_value is set to TRUE only when ALL the values
+ inside the cache are NULL
+ */
for (uint i= 0; i < item_count; i++)
{
values[i]->cache_value();
- null_value|= values[i]->null_value;
+ null_value&= values[i]->null_value;
+ null_value_inside|= values[i]->null_value;
}
return TRUE;
}
diff --git a/sql/item.h b/sql/item.h
index e2e18ce9b86..dbd2cb0b1df 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -2,7 +2,7 @@
#define SQL_ITEM_INCLUDED
/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2009, 2020, MariaDB Corporation.
+ Copyright (c) 2009, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1938,6 +1938,15 @@ public:
return 0;
}
+ /**
+ Check db/table_name if they defined in item and match arg values
+
+ @param arg Pointer to Check_table_name_prm structure
+
+ @retval true Match failed
+ @retval false Match succeeded
+ */
+ virtual bool check_table_name_processor(void *arg) { return false; }
/*
TRUE if the expression depends only on the table indicated by tab_map
or can be converted to such an exression using equalities.
@@ -2096,6 +2105,15 @@ public:
bool collect;
};
+ struct Check_table_name_prm
+ {
+ LEX_CSTRING db;
+ LEX_CSTRING table_name;
+ String field;
+ Check_table_name_prm(LEX_CSTRING _db, LEX_CSTRING _table_name) :
+ db(_db), table_name(_table_name) {}
+ };
+
/*
For SP local variable returns pointer to Item representing its
current value and pointer to current Item otherwise.
@@ -3500,6 +3518,24 @@ public:
}
return 0;
}
+ bool check_table_name_processor(void *arg)
+ {
+ Check_table_name_prm &p= *(Check_table_name_prm *) arg;
+ if (p.table_name.length && table_name)
+ {
+ DBUG_ASSERT(p.db.length);
+ if ((db_name &&
+ my_strcasecmp(table_alias_charset, p.db.str, db_name)) ||
+ my_strcasecmp(table_alias_charset, p.table_name.str, table_name))
+ {
+ print(&p.field, (enum_query_type) (QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_NO_DATA_EXPANSION |
+ QT_TO_SYSTEM_CHARSET));
+ return true;
+ }
+ }
+ return false;
+ }
void cleanup();
Item_equal *get_item_equal() { return item_equal; }
void set_item_equal(Item_equal *item_eq) { item_equal= item_eq; }
@@ -5569,14 +5605,17 @@ public:
return Item_ref::fix_fields(thd, it);
}
void save_val(Field *to);
+ /* Below we should have all val() methods as in Item_ref */
double val_real();
longlong val_int();
- String *val_str(String* tmp);
- bool val_native(THD *thd, Native *to);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
+ String *val_str(String* tmp);
+ bool val_native(THD *thd, Native *to);
bool is_null();
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ longlong val_datetime_packed(THD *);
+ longlong val_time_packed(THD *);
virtual Ref_Type ref_type() { return DIRECT_REF; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_direct_ref>(thd, this); }
@@ -5905,6 +5944,20 @@ public:
}
return Item_direct_ref::get_date(thd, ltime, fuzzydate);
}
+ longlong val_time_packed(THD *thd)
+ {
+ if (check_null_ref())
+ return 0;
+ else
+ return Item_direct_ref::val_time_packed(thd);
+ }
+ longlong val_datetime_packed(THD *thd)
+ {
+ if (check_null_ref())
+ return 0;
+ else
+ return Item_direct_ref::val_datetime_packed(thd);
+ }
bool send(Protocol *protocol, st_value *buffer);
void save_org_in_field(Field *field,
fast_field_copier data __attribute__ ((__unused__)))
@@ -6405,6 +6458,17 @@ public:
my_decimal *val_decimal(my_decimal *decimal_value);
bool get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate);
bool val_native(THD *thd, Native *to);
+ bool val_native_result(THD *thd, Native *to);
+
+ /* Result variants */
+ double val_result();
+ longlong val_int_result();
+ String *str_result(String* tmp);
+ my_decimal *val_decimal_result(my_decimal *val);
+ bool val_bool_result();
+ bool is_null_result();
+ bool get_date_result(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+
bool send(Protocol *protocol, st_value *buffer);
int save_in_field(Field *field_arg, bool no_conversions);
bool save_in_param(THD *thd, Item_param *param)
@@ -6433,6 +6497,8 @@ public:
}
Item *transform(THD *thd, Item_transformer transformer, uchar *args);
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
};
@@ -6721,6 +6787,14 @@ protected:
table_map used_table_map;
public:
+ /*
+ This is set if at least one of the values of a sub query is NULL
+ Item_cache_row returns this with null_inside().
+ For not row items, it's set to the value of null_value
+ It is set after cache_value() is called.
+ */
+ bool null_value_inside;
+
Item_cache(THD *thd):
Item(thd),
Type_handler_hybrid_field_type(&type_handler_string),
@@ -6730,6 +6804,7 @@ public:
{
maybe_null= 1;
null_value= 1;
+ null_value_inside= true;
}
protected:
Item_cache(THD *thd, const Type_handler *handler):
@@ -6741,6 +6816,7 @@ protected:
{
maybe_null= 1;
null_value= 1;
+ null_value_inside= true;
}
public:
@@ -7360,7 +7436,8 @@ void mark_select_range_as_dependent(THD *thd,
st_select_lex *last_select,
st_select_lex *current_sel,
Field *found_field, Item *found_item,
- Item_ident *resolved_item);
+ Item_ident *resolved_item,
+ bool suppress_warning_output);
extern Cached_item *new_Cached_item(THD *thd, Item *item,
bool pass_through_ref);
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index 9c96fdb1a9a..05cef6871be 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -47,9 +47,9 @@ Cached_item *new_Cached_item(THD *thd, Item *item, bool pass_through_ref)
}
switch (item->result_type()) {
case STRING_RESULT:
- return new Cached_item_str(thd, (Item_field *) item);
+ return new Cached_item_str(thd, item);
case INT_RESULT:
- return new Cached_item_int((Item_field *) item);
+ return new Cached_item_int(item);
case REAL_RESULT:
return new Cached_item_real(item);
case DECIMAL_RESULT:
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index c43dbcaab78..687aa7e192b 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -321,13 +321,13 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
TABLE *table= field->table;
Sql_mode_save sql_mode(thd);
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
- my_bitmap_map *old_maps[2] = { NULL, NULL };
+ MY_BITMAP *old_maps[2] = { NULL, NULL };
ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */
/* table->read_set may not be set if we come here from a CREATE TABLE */
if (table && table->read_set)
dbug_tmp_use_all_columns(table, old_maps,
- table->read_set, table->write_set);
+ &table->read_set, &table->write_set);
/* For comparison purposes allow invalid dates like 2000-01-32 */
thd->variables.sql_mode= (thd->variables.sql_mode & ~MODE_NO_ZERO_DATE) |
MODE_INVALID_DATES;
@@ -368,7 +368,7 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
DBUG_ASSERT(!result);
}
if (table && table->read_set)
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_maps);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_maps);
}
return result;
}
@@ -1198,9 +1198,9 @@ longlong Item_func_truth::val_int()
bool Item_in_optimizer::is_top_level_item()
{
- if (invisible_mode())
- return FALSE;
- return ((Item_in_subselect *)args[1])->is_top_level_item();
+ if (!invisible_mode())
+ return ((Item_in_subselect *)args[1])->is_top_level_item();
+ return false;
}
@@ -1565,7 +1565,7 @@ longlong Item_in_optimizer::val_int()
DBUG_RETURN(res);
}
- if (cache->null_value)
+ if (cache->null_value_inside)
{
DBUG_PRINT("info", ("Left NULL..."));
/*
@@ -3196,7 +3196,7 @@ bool Item_func_decode_oracle::fix_length_and_dec()
/*
Aggregate all THEN and ELSE expression types
and collations when string result
-
+
@param THD - current thd
@param start - an element in args to start aggregating from
*/
@@ -5473,6 +5473,7 @@ void Item_func_like::print(String *str, enum_query_type query_type)
longlong Item_func_like::val_int()
{
DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(escape != -1);
String* res= args[0]->val_str(&cmp_value1);
if (args[0]->null_value)
{
@@ -5559,15 +5560,29 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
bool escape_used_in_parsing, CHARSET_INFO *cmp_cs,
int *escape)
{
- if (!escape_item->const_during_execution())
+ /*
+ ESCAPE clause accepts only constant arguments and Item_param.
+
+ Subqueries during context_analysis_only might decide they're
+ const_during_execution, but not quite const yet, not evaluate-able.
+ This is fine, as most of context_analysis_only modes will never
+ reach val_int(), so we won't need the value.
+ CONTEXT_ANALYSIS_ONLY_DERIVED being a notable exception here.
+ */
+ if (!escape_item->const_during_execution() ||
+ (!escape_item->const_item() &&
+ !(thd->lex->context_analysis_only & ~CONTEXT_ANALYSIS_ONLY_DERIVED)))
{
my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE");
return TRUE;
}
-
+
+ IF_DBUG(*escape= -1,);
+
if (escape_item->const_item())
{
/* If we are on execution stage */
+ /* XXX is it safe to evaluate is_expensive() items here? */
String *escape_str= escape_item->val_str(tmp_str);
if (escape_str)
{
@@ -5642,13 +5657,17 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref)
if (!res2)
return FALSE; // Null argument
- const size_t len = res2->length();
- const char* first = res2->ptr();
- const char* last = first + len - 1;
+ const size_t len= res2->length();
+
/*
len must be > 2 ('%pattern%')
heuristic: only do TurboBM for pattern_len > 2
*/
+ if (len <= 2)
+ return FALSE;
+
+ const char* first= res2->ptr();
+ const char* last= first + len - 1;
if (len > MIN_TURBOBM_PATTERN_LEN + 2 &&
*first == wild_many &&
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index ab8e15372ad..d77da3fa48c 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -832,11 +832,7 @@ class Item_func_ne :public Item_bool_rowready_func2
{
protected:
SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param,
- Field *field, Item *value)
- {
- DBUG_ENTER("Item_func_ne::get_func_mm_tree");
- DBUG_RETURN(get_ne_mm_tree(param, field, value, value));
- }
+ Field *field, Item *value);
public:
Item_func_ne(THD *thd, Item *a, Item *b):
Item_bool_rowready_func2(thd, a, b) {}
@@ -2787,6 +2783,13 @@ public:
return this;
}
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
+ {
+ return walk_args(processor, walk_subquery, arg)
+ || escape_item->walk(processor, walk_subquery, arg)
+ || (this->*processor)(arg);
+ }
+
bool find_selective_predicates_list_processor(void *arg);
Item *get_copy(THD *thd)
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 5f6e7b29d5c..a80781259ca 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
- Copyright (c) 2008, 2020, MariaDB Corporation.
+ Copyright (c) 2008, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -109,7 +109,6 @@ public:
@return An item representing the function call
*/
virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2) = 0;
-
protected:
/** Constructor. */
Create_func_arg2() {}
@@ -136,7 +135,6 @@ public:
@return An item representing the function call
*/
virtual Item *create_3_arg(THD *thd, Item *arg1, Item *arg2, Item *arg3) = 0;
-
protected:
/** Constructor. */
Create_func_arg3() {}
@@ -908,6 +906,19 @@ class Create_func_distance : public Create_func_arg2
Create_func_distance() {}
virtual ~Create_func_distance() {}
};
+
+
+class Create_func_distance_sphere: public Create_native_func
+{
+ public:
+ Item *create_native(THD *thd, LEX_CSTRING *name, List<Item> *item_list);
+ static Create_func_distance_sphere s_singleton;
+
+ protected:
+ Create_func_distance_sphere() {}
+ virtual ~Create_func_distance_sphere() {}
+};
+
#endif
@@ -4878,6 +4889,26 @@ Create_func_glength::create_1_arg(THD *thd, Item *arg1)
{
return new (thd->mem_root) Item_func_glength(thd, arg1);
}
+
+
+Create_func_distance_sphere Create_func_distance_sphere::s_singleton;
+
+Item*
+Create_func_distance_sphere::create_native(THD *thd, LEX_CSTRING *name,
+ List<Item> *item_list)
+{
+ int arg_count= 0;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ if (arg_count < 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
+ return NULL;
+ }
+ return new (thd->mem_root) Item_func_sphere_distance(thd, *item_list);
+}
#endif
@@ -7457,6 +7488,7 @@ static Native_func_registry func_array[] =
{ { STRING_WITH_LEN("ST_WITHIN") }, GEOM_BUILDER(Create_func_within)},
{ { STRING_WITH_LEN("ST_X") }, GEOM_BUILDER(Create_func_x)},
{ { STRING_WITH_LEN("ST_Y") }, GEOM_BUILDER(Create_func_y)},
+ { { C_STRING_WITH_LEN("ST_DISTANCE_SPHERE") }, GEOM_BUILDER(Create_func_distance_sphere)},
{ { STRING_WITH_LEN("SUBSTR_ORACLE") },
BUILDER(Create_func_substr_oracle)},
{ { STRING_WITH_LEN("SUBSTRING_INDEX") }, BUILDER(Create_func_substr_index)},
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 837156ce39a..d50fb22a154 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1095,17 +1095,20 @@ double Item_func_plus::real_op()
return check_float_overflow(value);
}
+#if defined(__powerpc64__) && GCC_VERSION >= 6003 && GCC_VERSION <= 10002
+#pragma GCC push_options
+#pragma GCC optimize ("no-expensive-optimizations")
+#endif
longlong Item_func_plus::int_op()
{
longlong val0= args[0]->val_int();
longlong val1= args[1]->val_int();
- longlong res= val0 + val1;
bool res_unsigned= FALSE;
+ longlong res;
if ((null_value= args[0]->null_value || args[1]->null_value))
return 0;
-
/*
First check whether the result can be represented as a
(bool unsigned_flag, longlong value) pair, then check if it is compatible
@@ -1146,16 +1149,29 @@ longlong Item_func_plus::int_op()
{
if (val0 >=0 && val1 >= 0)
res_unsigned= TRUE;
- else if (val0 < 0 && val1 < 0 && res >= 0)
+ else if (val0 < 0 && val1 < 0 && val0 < (LONGLONG_MIN - val1))
goto err;
}
}
+
+#ifndef WITH_UBSAN
+ res= val0 + val1;
+#else
+ if (res_unsigned)
+ res= (longlong) ((ulonglong) val0 + (ulonglong) val1);
+ else
+ res= val0+val1;
+#endif /* WITH_UBSAN */
+
return check_integer_overflow(res, res_unsigned);
err:
return raise_integer_overflow();
}
+#if defined(__powerpc64__) && GCC_VERSION >= 6003 && GCC_VERSION <= 10002
+#pragma GCC pop_options
+#endif
/**
Calculate plus of two decimals.
@@ -1248,12 +1264,17 @@ double Item_func_minus::real_op()
}
+#if defined(__powerpc64__) && GCC_VERSION >= 6003 && GCC_VERSION <= 10002
+#pragma GCC push_options
+#pragma GCC optimize ("no-expensive-optimizations")
+#endif
+
longlong Item_func_minus::int_op()
{
longlong val0= args[0]->val_int();
longlong val1= args[1]->val_int();
- longlong res= val0 - val1;
bool res_unsigned= FALSE;
+ longlong res;
if ((null_value= args[0]->null_value || args[1]->null_value))
return 0;
@@ -1268,12 +1289,8 @@ longlong Item_func_minus::int_op()
if (args[1]->unsigned_flag)
{
if ((ulonglong) val0 < (ulonglong) val1)
- {
- if (res >= 0)
- goto err;
- }
- else
- res_unsigned= TRUE;
+ goto err;
+ res_unsigned= TRUE;
}
else
{
@@ -1294,23 +1311,35 @@ longlong Item_func_minus::int_op()
{
if (args[1]->unsigned_flag)
{
- if ((ulonglong) (val0 - LONGLONG_MIN) < (ulonglong) val1)
+ if (((ulonglong) val0 - (ulonglong) LONGLONG_MIN) < (ulonglong) val1)
goto err;
}
else
{
if (val0 > 0 && val1 < 0)
res_unsigned= TRUE;
- else if (val0 < 0 && val1 > 0 && res >= 0)
+ else if (val0 < 0 && val1 > 0 && val0 < (LONGLONG_MIN + val1))
goto err;
}
}
+#ifndef WITH_UBSAN
+ res= val0 - val1;
+#else
+ if (res_unsigned)
+ res= (longlong) ((ulonglong) val0 - (ulonglong) val1);
+ else
+ res= val0 - val1;
+#endif /* WITH_UBSAN */
+
return check_integer_overflow(res, res_unsigned);
err:
return raise_integer_overflow();
}
+#if defined(__powerpc64__) && GCC_VERSION >= 6003 && GCC_VERSION <= 10002
+#pragma GCC pop_options
+#endif
/**
See Item_func_plus::decimal_op for comments.
@@ -2130,31 +2159,29 @@ double Item_func_cot::val_real()
longlong Item_func_shift_left::val_int()
{
DBUG_ASSERT(fixed == 1);
- uint shift;
- ulonglong res= ((ulonglong) args[0]->val_int() <<
- (shift=(uint) args[1]->val_int()));
+ uint shift= (uint) args[1]->val_int();
+ ulonglong value= args[0]->val_int();
if (args[0]->null_value || args[1]->null_value)
{
null_value=1;
return 0;
}
null_value=0;
- return (shift < sizeof(longlong)*8 ? (longlong) res : 0);
+ return (shift < sizeof(longlong)*8 ? (value << shift) : 0);
}
longlong Item_func_shift_right::val_int()
{
DBUG_ASSERT(fixed == 1);
- uint shift;
- ulonglong res= (ulonglong) args[0]->val_int() >>
- (shift=(uint) args[1]->val_int());
+ uint shift= (uint) args[1]->val_int();
+ ulonglong value= args[0]->val_int();
if (args[0]->null_value || args[1]->null_value)
{
null_value=1;
return 0;
}
null_value=0;
- return (shift < sizeof(longlong)*8 ? (longlong) res : 0);
+ return (shift < sizeof(longlong)*8 ? (value >> shift) : 0);
}
@@ -3054,10 +3081,11 @@ longlong Item_func_locate::val_int()
if (arg_count == 3)
{
- start0= start= args[2]->val_int() - 1;
+ start0= start= args[2]->val_int();
- if ((start < 0) || (start > a->length()))
+ if ((start <= 0) || (start > a->length()))
return 0;
+ start0--; start--;
/* start is now sufficiently valid to pass to charpos function */
start= a->charpos((int) start);
@@ -3222,7 +3250,7 @@ bool Item_func_find_in_set::fix_length_and_dec()
find->length(), 0);
enum_bit=0;
if (enum_value)
- enum_bit=1LL << (enum_value-1);
+ enum_bit= 1ULL << (enum_value-1);
}
}
}
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 682051f2448..ef7ccf1dad6 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -136,7 +136,7 @@ String *Item_func_geometry_from_json::val_str(String *str)
{
String *sv= args[1]->val_str(&tmp_js);
my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0),
- "option", sv->c_ptr_safe(), "ST_GeometryFromJSON");
+ "option", sv->c_ptr_safe(), "ST_GeomFromGeoJSON");
null_value= 1;
return 0;
}
@@ -173,7 +173,7 @@ String *Item_func_geometry_from_json::val_str(String *str)
code= ER_GEOJSON_NOT_CLOSED;
break;
case Geometry::GEOJ_DIMENSION_NOT_SUPPORTED:
- my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON");
+ my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeomFromGeoJSON");
break;
default:
report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN);
@@ -2528,11 +2528,151 @@ mem_error:
}
+double Item_func_sphere_distance::val_real()
+{
+ /* To test null_value of item, first get well-known bytes as a backups */
+ String bak1, bak2;
+ String *arg1= args[0]->val_str(&bak1);
+ String *arg2= args[1]->val_str(&bak2);
+ double distance= 0.0;
+ double sphere_radius= 6370986.0; // Default radius equals Earth radius
+
+ null_value= (args[0]->null_value || args[1]->null_value);
+ if (null_value)
+ {
+ return 0;
+ }
+
+ if (arg_count == 3)
+ {
+ sphere_radius= args[2]->val_real();
+ // Radius cannot be Null
+ if (args[2]->null_value)
+ {
+ null_value= true;
+ return 0;
+ }
+ if (sphere_radius <= 0)
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0), "Radius must be greater than zero.");
+ return 1;
+ }
+ }
+ Geometry_buffer buffer1, buffer2;
+ Geometry *g1, *g2;
+ if (!(g1= Geometry::construct(&buffer1, arg1->ptr(), arg1->length())) ||
+ !(g2= Geometry::construct(&buffer2, arg2->ptr(), arg2->length())))
+ {
+ my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_Distance_Sphere");
+ goto handle_errors;
+ }
+// Method allowed for points and multipoints
+ if (!(g1->get_class_info()->m_type_id == Geometry::wkb_point ||
+ g1->get_class_info()->m_type_id == Geometry::wkb_multipoint) ||
+ !(g2->get_class_info()->m_type_id == Geometry::wkb_point ||
+ g2->get_class_info()->m_type_id == Geometry::wkb_multipoint))
+ {
+ // Generate error message in case different geometry is used?
+ my_error(ER_INTERNAL_ERROR, MYF(0), func_name());
+ return 0;
+ }
+ distance= spherical_distance_points(g1, g2, sphere_radius);
+ if (distance < 0)
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0), "Returned distance cannot be negative.");
+ return 1;
+ }
+ return distance;
+
+ handle_errors:
+ return 0;
+}
+
+
+double Item_func_sphere_distance::spherical_distance_points(Geometry *g1,
+ Geometry *g2,
+ const double r)
+{
+ double res= 0.0;
+ // Length for the single point (25 Bytes)
+ uint32 len= SRID_SIZE + POINT_DATA_SIZE + WKB_HEADER_SIZE;
+ int error= 0;
+
+ switch (g2->get_class_info()->m_type_id)
+ {
+ case Geometry::wkb_point:
+ // Optimization for point-point case
+ if (g1->get_class_info()->m_type_id == Geometry::wkb_point)
+ {
+ res= static_cast<Gis_point *>(g2)->calculate_haversine(g1, r, &error);
+ }
+ else
+ {
+ // Optimization for single point in Multipoint
+ if (g1->get_data_size() == len)
+ {
+ res= static_cast<Gis_point *>(g2)->calculate_haversine(g1, r, &error);
+ }
+ else
+ {
+ // There are multipoints in g1
+ // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point
+ if (g1->get_data_size() != GET_SIZE_ERROR)
+ static_cast<Gis_point *>(g2)->spherical_distance_multipoints(
+ (Gis_multi_point *)g1, r, &res, &error);
+ }
+ }
+ break;
+
+ case Geometry::wkb_multipoint:
+ // Optimization for point-point case
+ if (g1->get_class_info()->m_type_id == Geometry::wkb_point)
+ {
+ // Optimization for single point in Multipoint g2
+ if (g2->get_data_size() == len)
+ {
+ res= static_cast<Gis_point *>(g1)->calculate_haversine(g2, r, &error);
+ }
+ else
+ {
+ if (g2->get_data_size() != GET_SIZE_ERROR)
+ // g1 is a point (casted to multi_point) and g2 multipoint
+ static_cast<Gis_point *>(g1)->spherical_distance_multipoints(
+ (Gis_multi_point *)g2, r, &res, &error);
+ }
+ }
+ else
+ {
+ // Multipoints in g1 and g2 - no optimization
+ static_cast<Gis_multi_point *>(g1)->spherical_distance_multipoints(
+ (Gis_multi_point *)g2, r, &res, &error);
+ }
+ break;
+
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+
+ if (res < 0)
+ goto handle_error;
+
+ handle_error:
+ if (error > 0)
+ my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0),
+ "Longitude should be [-180,180]", "ST_Distance_Sphere");
+ else if(error < 0)
+ my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0),
+ "Latitude should be [-90,90]", "ST_Distance_Sphere");
+ return res;
+}
+
+
String *Item_func_pointonsurface::val_str(String *str)
{
Gcalc_operation_transporter trn(&func, &collector);
- DBUG_ENTER("Item_func_pointonsurface::val_real");
+ DBUG_ENTER("Item_func_pointonsurface::val_str");
DBUG_ASSERT(fixed == 1);
String *res= args[0]->val_str(&tmp_value);
Geometry_buffer buffer;
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index 4e7cda137c2..245a8353d35 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -2,7 +2,7 @@
#define ITEM_GEOFUNC_INCLUDED
/* Copyright (c) 2000, 2016 Oracle and/or its affiliates.
- Copyright (C) 2011, 2016, MariaDB
+ Copyright (C) 2011, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -934,6 +934,20 @@ public:
};
+class Item_func_sphere_distance: public Item_real_func
+{
+ double spherical_distance_points(Geometry *g1, Geometry *g2,
+ const double sphere_r);
+public:
+ Item_func_sphere_distance(THD *thd, List<Item> &list):
+ Item_real_func(thd, list) {}
+ double val_real();
+ const char *func_name() const { return "st_distance_sphere"; }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_sphere_distance>(thd, this); }
+};
+
+
class Item_func_pointonsurface: public Item_geometry_func_args_geometry
{
String tmp_value;
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 0d15c5e9ad0..4b4a94c814e 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -618,8 +618,6 @@ String *Item_func_json_unquote::read_json(json_engine_t *je)
json_scan_start(je, js->charset(),(const uchar *) js->ptr(),
(const uchar *) js->ptr() + js->length());
- je->value_type= (enum json_value_types) -1; /* To report errors right. */
-
if (json_read_value(je))
goto error;
@@ -982,7 +980,8 @@ my_decimal *Item_func_json_extract::val_decimal(my_decimal *to)
case JSON_VALUE_ARRAY:
case JSON_VALUE_FALSE:
case JSON_VALUE_NULL:
- break;
+ case JSON_VALUE_UNINITALIZED:
+ break;
};
}
int2my_decimal(E_DEC_FATAL_ERROR, 0, false/*unsigned_flag*/, to);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index ea14e9d44f8..759b18c4657 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1514,17 +1514,18 @@ String *Item_func_insert::val_str(String *str)
null_value=0;
res=args[0]->val_str(str);
res2=args[3]->val_str(&tmp_value);
- start= args[1]->val_int() - 1;
+ start= args[1]->val_int();
length= args[2]->val_int();
if (args[0]->null_value || args[1]->null_value || args[2]->null_value ||
args[3]->null_value)
goto null; /* purecov: inspected */
- if ((start < 0) || (start > res->length()))
+ if ((start <= 0) || (start > res->length()))
return res; // Wrong param; skip insert
if ((length < 0) || (length > res->length()))
length= res->length();
+ start--;
/*
There is one exception not handled (intentionaly) by the character set
@@ -3795,13 +3796,12 @@ String *Item_func_unhex::val_str(String *str)
}
for (end=res->ptr()+res->length(); from < end ; from+=2, to++)
{
- int hex_char;
- *to= (hex_char= hexchar_to_int(from[0])) << 4;
- if ((null_value= (hex_char == -1)))
- return 0;
- *to|= hex_char= hexchar_to_int(from[1]);
- if ((null_value= (hex_char == -1)))
+ int hex_char1, hex_char2;
+ hex_char1= hexchar_to_int(from[0]);
+ hex_char2= hexchar_to_int(from[1]);
+ if ((null_value= (hex_char1 == -1 || hex_char2 == -1)))
return 0;
+ *to= (char) ((hex_char1 << 4) | hex_char2);
}
return str;
}
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index c13c49b8363..826a978805e 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -42,8 +42,13 @@ protected:
we don't want to free and potentially have to reallocate the buffer
for each call.
*/
- str_value.length(0);
- str_value.set_charset(collation.collation);
+ if (!str_value.is_alloced())
+ str_value.set("", 0, collation.collation); /* Avoid null ptrs */
+ else
+ {
+ str_value.length(0); /* Reuse allocated area */
+ str_value.set_charset(collation.collation);
+ }
return &str_value;
}
public:
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index e2058475d0e..53a6847c52f 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -276,7 +276,11 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
{
if (sl->tvc)
{
- wrap_tvc_into_select(thd, sl);
+ if (!(sl= wrap_tvc_into_select(thd, sl)))
+ {
+ res= TRUE;
+ goto end;
+ }
}
}
@@ -380,7 +384,7 @@ bool Item_subselect::mark_as_eliminated_processor(void *arg)
bool Item_subselect::eliminate_subselect_processor(void *arg)
{
unit->item= NULL;
- unit->exclude_from_tree();
+ unit->exclude();
eliminated= TRUE;
return FALSE;
}
@@ -449,6 +453,26 @@ bool Item_subselect::mark_as_dependent(THD *thd, st_select_lex *select,
/*
+ @brief
+ Update the table bitmaps for the outer references used within a subquery
+*/
+
+bool Item_subselect::update_table_bitmaps_processor(void *arg)
+{
+ List_iterator<Ref_to_outside> it(upper_refs);
+ Ref_to_outside *upper;
+
+ while ((upper= it++))
+ {
+ if (upper->item &&
+ upper->item->walk(&Item::update_table_bitmaps_processor, FALSE, arg))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/*
Adjust attributes after our parent select has been merged into grandparent
DESCRIPTION
@@ -659,6 +683,31 @@ bool Item_subselect::is_expensive()
}
+static
+int walk_items_for_table_list(Item_processor processor,
+ bool walk_subquery, void *argument,
+ List<TABLE_LIST>& join_list)
+{
+ List_iterator<TABLE_LIST> li(join_list);
+ int res;
+ while (TABLE_LIST *table= li++)
+ {
+ if (table->on_expr)
+ {
+ if ((res= table->on_expr->walk(processor, walk_subquery, argument)))
+ return res;
+ }
+ if (table->nested_join)
+ {
+ if ((res= walk_items_for_table_list(processor, walk_subquery, argument,
+ table->nested_join->join_list)))
+ return res;
+ }
+ }
+ return 0;
+}
+
+
bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
void *argument)
{
@@ -690,7 +739,10 @@ bool Item_subselect::walk(Item_processor processor, bool walk_subquery,
if (lex->having && (lex->having)->walk(processor, walk_subquery,
argument))
return 1;
- /* TODO: why does this walk WHERE/HAVING but not ON expressions of outer joins? */
+
+ if (walk_items_for_table_list(processor, walk_subquery, argument,
+ *lex->join_list))
+ return 1;
while ((item=li++))
{
@@ -855,7 +907,7 @@ bool Item_subselect::expr_cache_is_needed(THD *thd)
inline bool Item_in_subselect::left_expr_has_null()
{
- return (*(optimizer->get_cache()))->null_value;
+ return (*(optimizer->get_cache()))->null_value_inside;
}
@@ -1309,7 +1361,17 @@ bool Item_singlerow_subselect::null_inside()
void Item_singlerow_subselect::bring_value()
{
if (!exec() && assigned())
- null_value= 0;
+ {
+ null_value= true;
+ for (uint i= 0; i < max_columns ; i++)
+ {
+ if (!row[i]->null_value)
+ {
+ null_value= false;
+ return;
+ }
+ }
+ }
else
reset();
}
@@ -1335,7 +1397,11 @@ longlong Item_singlerow_subselect::val_int()
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->val_int();
+ {
+ longlong val= value->val_int();
+ null_value= value->null_value;
+ return val;
+ }
if (!exec() && !value->null_value)
{
null_value= FALSE;
@@ -1344,6 +1410,7 @@ longlong Item_singlerow_subselect::val_int()
else
{
reset();
+ DBUG_ASSERT(null_value);
return 0;
}
}
@@ -1352,7 +1419,11 @@ String *Item_singlerow_subselect::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->val_str(str);
+ {
+ String *res= value->val_str(str);
+ null_value= value->null_value;
+ return res;
+ }
if (!exec() && !value->null_value)
{
null_value= FALSE;
@@ -1361,6 +1432,7 @@ String *Item_singlerow_subselect::val_str(String *str)
else
{
reset();
+ DBUG_ASSERT(null_value);
return 0;
}
}
@@ -1388,7 +1460,11 @@ my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->val_decimal(decimal_value);
+ {
+ my_decimal *val= value->val_decimal(decimal_value);
+ null_value= value->null_value;
+ return val;
+ }
if (!exec() && !value->null_value)
{
null_value= FALSE;
@@ -1397,6 +1473,7 @@ my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value)
else
{
reset();
+ DBUG_ASSERT(null_value);
return 0;
}
}
@@ -1406,7 +1483,11 @@ bool Item_singlerow_subselect::val_bool()
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->val_bool();
+ {
+ bool val= value->val_bool();
+ null_value= value->null_value;
+ return val;
+ }
if (!exec() && !value->null_value)
{
null_value= FALSE;
@@ -1415,6 +1496,7 @@ bool Item_singlerow_subselect::val_bool()
else
{
reset();
+ DBUG_ASSERT(null_value);
return 0;
}
}
@@ -1424,7 +1506,11 @@ bool Item_singlerow_subselect::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->get_date(thd, ltime, fuzzydate);
+ {
+ bool val= value->get_date(thd, ltime, fuzzydate);
+ null_value= value->null_value;
+ return val;
+ }
if (!exec() && !value->null_value)
{
null_value= FALSE;
@@ -1433,6 +1519,7 @@ bool Item_singlerow_subselect::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t
else
{
reset();
+ DBUG_ASSERT(null_value);
return 1;
}
}
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 16a4735359b..4816785fa13 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -50,7 +50,11 @@ class Item_subselect :public Item_result_field,
protected Used_tables_and_const_cache,
protected With_sum_func_cache
{
- bool value_assigned; /* value already assigned to subselect */
+ /*
+ Set to TRUE if the value is assigned for the subselect
+ FALSE: subquery not executed or the subquery returns an empty result
+ */
+ bool value_assigned;
bool own_engine; /* the engine was not taken from other Item_subselect */
protected:
/* thread handler, will be assigned in fix_fields only */
@@ -256,6 +260,7 @@ public:
@retval FALSE otherwise
*/
bool is_expensive_processor(void *arg) { return is_expensive(); }
+ bool update_table_bitmaps_processor(void *arg);
/**
Get the SELECT_LEX structure associated with this Item.
@@ -277,7 +282,7 @@ public:
Item* build_clone(THD *thd) { return 0; }
Item* get_copy(THD *thd) { return 0; }
- bool wrap_tvc_into_select(THD *thd, st_select_lex *tvc_sl);
+ st_select_lex *wrap_tvc_into_select(THD *thd, st_select_lex *tvc_sl);
friend class select_result_interceptor;
friend class Item_in_optimizer;
@@ -286,7 +291,8 @@ public:
friend bool Item_ref::fix_fields(THD *, Item **);
friend void mark_select_range_as_dependent(THD*,
st_select_lex*, st_select_lex*,
- Field*, Item*, Item_ident*);
+ Field*, Item*, Item_ident*,
+ bool);
friend bool convert_join_subqueries_to_semijoins(JOIN *join);
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index e00fc2fd3ab..581c94bd191 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -2635,9 +2635,9 @@ bool Item_sum_bit::add_as_window(ulonglong value)
void Item_sum_or::set_bits_from_counters()
{
ulonglong value= 0;
- for (int i= 0; i < NUM_BIT_COUNTERS; i++)
+ for (uint i= 0; i < NUM_BIT_COUNTERS; i++)
{
- value|= bit_counters[i] > 0 ? (1 << i) : 0;
+ value|= bit_counters[i] > 0 ? (1ULL << i) : 0ULL;
}
bits= value | reset_bits;
}
@@ -3619,7 +3619,7 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
{
Item_func_group_concat *item= (Item_func_group_concat *) item_arg;
TABLE *table= item->table;
- uint max_length= (uint)table->in_use->variables.group_concat_max_len;
+ uint max_length= table->in_use->variables.group_concat_max_len;
String tmp((char *)table->record[1], table->s->reclength,
default_charset_info);
String tmp2;
@@ -3738,7 +3738,7 @@ Item_func_group_concat(THD *thd, Name_resolution_context *context_arg,
arg_count_field(select_list->elements),
row_count(0),
distinct(distinct_arg),
- warning_for_row(FALSE),
+ warning_for_row(FALSE), always_null(FALSE),
force_copy_fields(0), row_limit(NULL),
offset_limit(NULL), limit_clause(limit_clause),
copy_offset_limit(0), copy_row_limit(0), original(0)
@@ -3960,7 +3960,7 @@ bool Item_func_group_concat::repack_tree(THD *thd)
DBUG_ASSERT(tree->size_of_element == st.tree.size_of_element);
st.table= table;
st.len= 0;
- st.maxlen= (size_t)thd->variables.group_concat_max_len;
+ st.maxlen= thd->variables.group_concat_max_len;
tree_walk(tree, &copy_to_tree, &st, left_root_right);
if (st.len <= st.maxlen) // Copying aborted. Must be OOM
{
@@ -3981,7 +3981,7 @@ bool Item_func_group_concat::repack_tree(THD *thd)
decreases up to N=10 (that is, factor=1024) and then starts to increase,
again, very slowly.
*/
-#define GCONCAT_REPACK_FACTOR (1 << 10)
+#define GCONCAT_REPACK_FACTOR 10
bool Item_func_group_concat::add()
{
@@ -4027,7 +4027,7 @@ bool Item_func_group_concat::add()
{
THD *thd= table->in_use;
table->field[0]->store(row_str_len, FALSE);
- if (tree_len > thd->variables.group_concat_max_len * GCONCAT_REPACK_FACTOR
+ if ((tree_len >> GCONCAT_REPACK_FACTOR) > thd->variables.group_concat_max_len
&& tree->elements_in_tree > 1)
if (repack_tree(thd))
return 1;
@@ -4082,9 +4082,9 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
result.set_charset(collation.collation);
result_field= 0;
null_value= 1;
- max_length= (uint32)(thd->variables.group_concat_max_len
- / collation.collation->mbminlen
- * collation.collation->mbmaxlen);
+ max_length= (uint32)MY_MIN(thd->variables.group_concat_max_len
+ / collation.collation->mbminlen
+ * collation.collation->mbmaxlen, UINT_MAX32);
uint32 offset;
if (separator->needs_conversion(separator->length(), separator->charset(),
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index b476b5f1f27..7dbc81e8825 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -733,7 +733,10 @@ static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
For example, '1.1' -> '1.100000'
*/
-static bool get_interval_info(const char *str, size_t length,CHARSET_INFO *cs, size_t count, ulonglong *values,
+#define MAX_DIGITS_IN_TIME_SPEC 20
+
+static bool get_interval_info(const char *str, size_t length,CHARSET_INFO *cs,
+ size_t count, ulonglong *values,
bool transform_msec)
{
const char *end=str+length;
@@ -745,11 +748,21 @@ static bool get_interval_info(const char *str, size_t length,CHARSET_INFO *cs, s
for (i=0 ; i < count ; i++)
{
- longlong value;
+ ulonglong value;
const char *start= str;
- for (value= 0; str != end && my_isdigit(cs, *str); str++)
+ const char *local_end= end;
+
+ /*
+ We limit things to 19 digits to not get an overflow. This is ok as
+ this function is meant to read up to microseconds
+ */
+ if ((local_end-str) > MAX_DIGITS_IN_TIME_SPEC)
+ local_end= str+ MAX_DIGITS_IN_TIME_SPEC;
+
+ for (value= 0; str != local_end && my_isdigit(cs, *str) ; str++)
value= value*10 + *str - '0';
- if ((field_length= (size_t)(str - start)) >= 20)
+
+ if ((field_length= (size_t)(str - start)) >= MAX_DIGITS_IN_TIME_SPEC)
return true;
values[i]= value;
while (str != end && !my_isdigit(cs,*str))
@@ -2070,9 +2083,9 @@ bool Func_handler_date_add_interval_datetime_arg0_time::
bool Item_date_add_interval::eq(const Item *item, bool binary_cmp) const
{
- Item_date_add_interval *other= (Item_date_add_interval*) item;
if (!Item_func::eq(item, binary_cmp))
return 0;
+ Item_date_add_interval *other= (Item_date_add_interval*) item;
return ((int_type == other->int_type) &&
(date_sub_interval == other->date_sub_interval));
}
diff --git a/sql/key.cc b/sql/key.cc
index adff6975631..6f0a1112497 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -244,14 +244,13 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
else if (key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
Field *field= key_part->field;
- my_bitmap_map *old_map;
my_ptrdiff_t ptrdiff= to_record - field->table->record[0];
field->move_field_offset(ptrdiff);
key_length-= HA_KEY_BLOB_LENGTH;
length= MY_MIN(key_length, key_part->length);
- old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, &field->table->write_set);
field->set_key_image(from_key, length);
- dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->write_set, old_map);
from_key+= HA_KEY_BLOB_LENGTH;
field->move_field_offset(-ptrdiff);
}
@@ -419,7 +418,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
void key_unpack(String *to, TABLE *table, KEY *key)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("key_unpack");
to->length(0);
@@ -443,7 +442,7 @@ void key_unpack(String *to, TABLE *table, KEY *key)
field_unpack(to, key_part->field, table->record[0], key_part->length,
MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG));
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_VOID_RETURN;
}
diff --git a/sql/lock.cc b/sql/lock.cc
index a3744d7f000..3a2001fbc34 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -729,6 +729,9 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
- GET_LOCK_STORE_LOCKS : Store lock info in TABLE
- GET_LOCK_SKIP_SEQUENCES : Ignore sequences (for temporary unlock)
- GET_LOCK_ON_THD : Store lock in thd->mem_root
+
+ Temporary tables are not locked (as these are single user), except for
+ TRANSACTIONAL_TMP_TABLES as locking is needed to handle transactions.
*/
MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
@@ -745,8 +748,8 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
{
TABLE *t= table_ptr[i];
- if (t->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE &&
- t->s->tmp_table != INTERNAL_TMP_TABLE &&
+ if ((likely(!t->s->tmp_table) ||
+ (t->s->tmp_table == TRANSACTIONAL_TMP_TABLE)) &&
(!(flags & GET_LOCK_SKIP_SEQUENCES) || t->s->sequence == 0))
{
lock_count+= t->file->lock_count();
@@ -774,13 +777,13 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
for (i=0 ; i < count ; i++)
{
- TABLE *table;
+ TABLE *table= table_ptr[i];
enum thr_lock_type lock_type;
THR_LOCK_DATA **locks_start;
- table= table_ptr[i];
- if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE ||
- table->s->tmp_table == INTERNAL_TMP_TABLE ||
- ((flags & GET_LOCK_SKIP_SEQUENCES) && table->s->sequence))
+
+ if (!((likely(!table->s->tmp_table) ||
+ (table->s->tmp_table == TRANSACTIONAL_TMP_TABLE)) &&
+ (!(flags & GET_LOCK_SKIP_SEQUENCES) || table->s->sequence == 0)))
continue;
lock_type= table->reginfo.lock_type;
DBUG_ASSERT(lock_type != TL_WRITE_DEFAULT && lock_type != TL_READ_DEFAULT);
diff --git a/sql/log.cc b/sql/log.cc
index 480e3b696cc..49a319eb29d 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -6390,8 +6390,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
DBUG_ASSERT(!thd->backup_commit_lock);
mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, MDL_EXPLICIT);
- thd->mdl_context.acquire_lock(&mdl_request,
- thd->variables.lock_wait_timeout);
+ if (thd->mdl_context.acquire_lock(&mdl_request,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(1);
thd->backup_commit_lock= &mdl_request;
if ((res= thd->wait_for_prior_commit()))
@@ -6983,6 +6984,9 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate,
bool check_purge= false;
mysql_mutex_lock(&LOCK_log);
+
+ DEBUG_SYNC(current_thd, "rotate_after_acquire_LOCK_log");
+
prev_binlog_id= current_binlog_id;
if ((err_gtid= do_delete_gtid_domain(domain_drop_lex)))
@@ -6993,11 +6997,22 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate,
}
else if (unlikely((error= rotate(force_rotate, &check_purge))))
check_purge= false;
+
+ DEBUG_SYNC(current_thd, "rotate_after_rotate");
+
/*
NOTE: Run purge_logs wo/ holding LOCK_log because it does not need
the mutex. Otherwise causes various deadlocks.
+ Explicit binlog rotation must be synchronized with a concurrent
+ binlog ordered commit, in particular not let binlog
+ checkpoint notification request until early binlogged
+ concurrent commits have has been completed.
*/
+ mysql_mutex_lock(&LOCK_after_binlog_sync);
mysql_mutex_unlock(&LOCK_log);
+ mysql_mutex_lock(&LOCK_commit_ordered);
+ mysql_mutex_unlock(&LOCK_after_binlog_sync);
+ mysql_mutex_unlock(&LOCK_commit_ordered);
if (check_purge)
checkpoint_and_purge(prev_binlog_id);
@@ -7470,6 +7485,8 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd,
new transaction directly to participate in the group commit.
@retval < 0 Error
+ @retval -2 WSREP error with commit ordering
+ @retval -3 WSREP return code to mark the leader
@retval > 0 If queued as the first entry in the queue (meaning this
is the leader)
@retval 0 Otherwise (queued as participant, leader handles the commit)
@@ -7767,6 +7784,22 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
cur= entry->thd->wait_for_commit_ptr;
}
+#ifdef WITH_WSREP
+ if (wsrep_is_active(entry->thd) &&
+ wsrep_run_commit_hook(entry->thd, entry->all))
+ {
+ /* Release commit order here */
+ if (wsrep_ordered_commit(entry->thd, entry->all, wsrep_apply_error()))
+ result= -2;
+
+ /* return -3, if this is leader */
+ if (orig_queue == NULL)
+ result= -3;
+ }
+ else
+ DBUG_ASSERT(result != -2 && result != -3);
+#endif /* WITH_WSREP */
+
if (opt_binlog_commit_wait_count > 0 && orig_queue != NULL)
mysql_cond_signal(&COND_prepare_ordered);
mysql_mutex_unlock(&LOCK_prepare_ordered);
@@ -7788,25 +7821,32 @@ MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
{
int is_leader= queue_for_group_commit(entry);
#ifdef WITH_WSREP
- if (wsrep_is_active(entry->thd) &&
- wsrep_run_commit_hook(entry->thd, entry->all))
- {
- /*
- Release commit order and if leader, wait for prior commit to
- complete. This establishes total order for group leaders.
- */
- if (wsrep_ordered_commit(entry->thd, entry->all, wsrep_apply_error()))
- {
- entry->thd->wakeup_subsequent_commits(1);
- return 1;
- }
- if (is_leader)
- {
- if (entry->thd->wait_for_prior_commit())
- return 1;
- }
+ /* commit order was released in queue_for_group_commit() call,
+ here we check if wsrep_commit_ordered() failed or if we are leader */
+ switch (is_leader)
+ {
+ case -2: /* wsrep_ordered_commit() has failed */
+ DBUG_ASSERT(wsrep_is_active(entry->thd));
+ DBUG_ASSERT(wsrep_run_commit_hook(entry->thd, entry->all));
+ entry->thd->wakeup_subsequent_commits(1);
+ return true;
+ case -3: /* this is leader, wait for prior commit to
+ complete. This establishes total order for group leaders
+ */
+ DBUG_ASSERT(wsrep_is_active(entry->thd));
+ DBUG_ASSERT(wsrep_run_commit_hook(entry->thd, entry->all));
+ if (entry->thd->wait_for_prior_commit())
+ return true;
+
+ /* retain the correct is_leader value */
+ is_leader= 1;
+ break;
+
+ default: /* native MariaDB cases */
+ break;
}
#endif /* WITH_WSREP */
+
/*
The first in the queue handles group commit for all; the others just wait
to be signalled when group commit is done.
@@ -8173,7 +8213,12 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
}
DEBUG_SYNC(leader->thd, "commit_before_get_LOCK_commit_ordered");
+
mysql_mutex_lock(&LOCK_commit_ordered);
+ DBUG_EXECUTE_IF("crash_before_engine_commit",
+ {
+ DBUG_SUICIDE();
+ });
last_commit_pos_offset= commit_offset;
/*
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 6871eeda79e..337de3508ed 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -8983,8 +8983,20 @@ err:
}
#endif /* MYSQL_CLIENT */
-
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+static bool wsrep_must_replay(THD *thd)
+{
+#ifdef WITH_WSREP
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ bool res= WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ return res;
+#else
+ return false;
+#endif
+}
+
+
int Xid_log_event::do_apply_event(rpl_group_info *rgi)
{
bool res;
@@ -9049,16 +9061,8 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
res= trans_commit(thd); /* Automatically rolls back on error. */
thd->release_transactional_locks();
-#ifdef WITH_WSREP
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
- if ((!res || (WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay )) && sub_id)
-#else
- if (likely(!res) && sub_id)
-#endif /* WITH_WSREP */
+ if (sub_id && (!res || wsrep_must_replay(thd)))
rpl_global_gtid_slave_state->update_state_hash(sub_id, &gtid, hton, rgi);
-#ifdef WITH_WSREP
- if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
-#endif /* WITH_WSREP */
/*
Increment the global status commit count variable
*/
@@ -11267,7 +11271,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
There was the same problem with MERGE MYISAM tables and so here we try to
go the same way.
*/
-static void restore_empty_query_table_list(LEX *lex)
+inline void restore_empty_query_table_list(LEX *lex)
{
if (lex->first_not_own_table())
(*lex->first_not_own_table()->prev_global)= NULL;
@@ -11282,6 +11286,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
TABLE* table;
DBUG_ENTER("Rows_log_event::do_apply_event(Relay_log_info*)");
int error= 0;
+ LEX *lex= thd->lex;
+ uint8 new_trg_event_map= get_trg_event_map();
/*
If m_table_id == ~0ULL, then we have a dummy event that does not
contain any data. In that case, we just remove all tables in the
@@ -11372,27 +11378,29 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(action)));
};);
- if (slave_run_triggers_for_rbr)
- {
- LEX *lex= thd->lex;
- uint8 new_trg_event_map= get_trg_event_map();
-
- /*
- Trigger's procedures work with global table list. So we have to add
- rgi->tables_to_lock content there to get trigger's in the list.
+ /*
+ Trigger's procedures work with global table list. So we have to add
+ rgi->tables_to_lock content there to get trigger's in the list.
- Then restore_empty_query_table_list() restore the list as it was
- */
- DBUG_ASSERT(lex->query_tables == NULL);
- if ((lex->query_tables= rgi->tables_to_lock))
- rgi->tables_to_lock->prev_global= &lex->query_tables;
+ Then restore_empty_query_table_list() restore the list as it was
+ */
+ DBUG_ASSERT(lex->query_tables == NULL);
+ if ((lex->query_tables= rgi->tables_to_lock))
+ rgi->tables_to_lock->prev_global= &lex->query_tables;
- for (TABLE_LIST *tables= rgi->tables_to_lock; tables;
- tables= tables->next_global)
+ for (TABLE_LIST *tables= rgi->tables_to_lock; tables;
+ tables= tables->next_global)
+ {
+ if (slave_run_triggers_for_rbr)
{
tables->trg_event_map= new_trg_event_map;
lex->query_tables_last= &tables->next_global;
}
+ else
+ {
+ tables->slave_fk_event_map= new_trg_event_map;
+ lex->query_tables_last= &tables->next_global;
+ }
}
if (unlikely(open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)))
{
@@ -11749,8 +11757,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
}
/* remove trigger's tables */
- if (slave_run_triggers_for_rbr)
- restore_empty_query_table_list(thd->lex);
+ restore_empty_query_table_list(thd->lex);
#if defined(WITH_WSREP) && defined(HAVE_QUERY_CACHE)
if (WSREP(thd) && wsrep_thd_is_applying(thd))
@@ -11769,8 +11776,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
DBUG_RETURN(error);
err:
- if (slave_run_triggers_for_rbr)
- restore_empty_query_table_list(thd->lex);
+ restore_empty_query_table_list(thd->lex);
rgi->slave_close_thread_tables(thd);
DBUG_RETURN(error);
}
@@ -13739,11 +13745,11 @@ int Rows_log_event::update_sequence()
/* This event come from a setval function executed on the master.
Update the sequence next_number and round, like we do with setval()
*/
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->read_set);
longlong nextval= table->field[NEXT_FIELD_NO]->val_int();
longlong round= table->field[ROUND_FIELD_NO]->val_int();
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return table->s->sequence->set_value(table, nextval, round, 0) > 0;
}
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 93b7982c4a5..5e54178db70 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -1138,7 +1138,7 @@ MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout,
DBUG_ASSERT(!debug_sync_set_action((owner->get_thd()),
STRING_WITH_LEN(act)));
};);
- if (wsrep_thd_is_BF(owner->get_thd(), false))
+ if (WSREP_ON && wsrep_thd_is_BF(owner->get_thd(), false))
{
wait_result= mysql_cond_wait(&m_COND_wait_status, &m_LOCK_wait_status);
}
@@ -1211,7 +1211,7 @@ void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket)
*/
DBUG_ASSERT(ticket->get_lock());
#ifdef WITH_WSREP
- if ((this == &(ticket->get_lock()->m_waiting)) &&
+ if (WSREP_ON && (this == &(ticket->get_lock()->m_waiting)) &&
wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false))
{
Ticket_iterator itw(ticket->get_lock()->m_waiting);
@@ -2782,6 +2782,7 @@ void MDL_context::find_deadlock()
context was waiting is concurrently satisfied.
*/
(void) victim->m_wait.set_status(MDL_wait::VICTIM);
+ victim->inc_deadlock_overweight();
victim->unlock_deadlock_victim();
if (victim == this)
diff --git a/sql/mdl.h b/sql/mdl.h
index 9a788b0ea31..a2cb7c2aa85 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -909,7 +909,8 @@ public:
/** @pre Only valid if we started waiting for lock. */
inline uint get_deadlock_weight() const
- { return m_waiting_for->get_deadlock_weight(); }
+ { return m_waiting_for->get_deadlock_weight() + m_deadlock_overweight; }
+ void inc_deadlock_overweight() { m_deadlock_overweight++; }
/**
Post signal to the context (and wake it up if necessary).
@@ -1027,6 +1028,7 @@ private:
*/
MDL_wait_for_subgraph *m_waiting_for;
LF_PINS *m_pins;
+ uint m_deadlock_overweight= 0;
private:
MDL_ticket *find_ticket(MDL_request *mdl_req,
enum_mdl_duration *duration);
diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc
index c9cff6ad930..877a49edbec 100644
--- a/sql/mf_iocache.cc
+++ b/sql/mf_iocache.cc
@@ -26,7 +26,7 @@
Used instead of FILE when reading or writing whole files.
This will make mf_rec_cache obsolete.
One can change info->pos_in_file to a higher value to skip bytes in file if
- also info->rc_pos is set to info->rc_end.
+ also info->read_pos is set to info->read_end.
If called through open_cached_file(), then the temporary file will
only be created if a write exeeds the file buffer or if one calls
flush_io_cache().
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index a74fb4326e4..7e3ce878cdc 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -363,7 +363,6 @@ static bool binlog_format_used= false;
LEX_STRING opt_init_connect, opt_init_slave;
mysql_cond_t COND_thread_cache;
static mysql_cond_t COND_flush_thread_cache;
-mysql_cond_t COND_slave_background;
static DYNAMIC_ARRAY all_options;
static longlong start_memory_used;
@@ -701,7 +700,7 @@ mysql_mutex_t
LOCK_crypt,
LOCK_global_system_variables,
LOCK_user_conn,
- LOCK_connection_count, LOCK_error_messages, LOCK_slave_background;
+ LOCK_connection_count, LOCK_error_messages;
mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
LOCK_global_table_stats, LOCK_global_index_stats;
@@ -893,8 +892,7 @@ PSI_mutex_key key_LOCK_stats,
PSI_mutex_key key_LOCK_gtid_waiting;
PSI_mutex_key key_LOCK_after_binlog_sync;
-PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered,
- key_LOCK_slave_background;
+PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
PSI_mutex_key key_TABLE_SHARE_LOCK_share;
PSI_mutex_key key_LOCK_ack_receiver;
@@ -968,7 +966,6 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_after_binlog_sync, "LOCK_after_binlog_sync", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
- { &key_LOCK_slave_background, "LOCK_slave_background", PSI_FLAG_GLOBAL},
{ &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
{ &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0},
{ &key_LOCK_slave_state, "LOCK_slave_state", 0},
@@ -1039,7 +1036,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy;
PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread,
key_COND_rpl_thread_stop, key_COND_rpl_thread_pool,
key_COND_parallel_entry, key_COND_group_commit_orderer,
- key_COND_prepare_ordered, key_COND_slave_background;
+ key_COND_prepare_ordered;
PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates;
PSI_cond_key key_COND_ack_receiver;
@@ -1087,7 +1084,6 @@ static PSI_cond_info all_server_conds[]=
{ &key_COND_parallel_entry, "COND_parallel_entry", 0},
{ &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0},
{ &key_COND_prepare_ordered, "COND_prepare_ordered", 0},
- { &key_COND_slave_background, "COND_slave_background", 0},
{ &key_COND_start_thread, "COND_start_thread", PSI_FLAG_GLOBAL},
{ &key_COND_wait_gtid, "COND_wait_gtid", 0},
{ &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0},
@@ -1135,14 +1131,6 @@ PSI_file_key key_file_binlog_state;
PSI_statement_info stmt_info_new_packet;
#endif
-#ifdef WITH_WSREP
-/** Whether the Galera write-set replication is enabled. A cached copy of
-global_system_variables.wsrep_on && wsrep_provider &&
- strcmp(wsrep_provider, WSREP_NONE)
-*/
-bool WSREP_ON_;
-#endif /* WITH_WSREP */
-
#ifndef EMBEDDED_LIBRARY
void net_before_header_psi(struct st_net *net, void *thd, size_t /* unused: count */)
{
@@ -1525,31 +1513,9 @@ static void end_ssl();
/* common callee of two shutdown phases */
static void kill_thread(THD *thd)
{
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
mysql_mutex_lock(&thd->LOCK_thd_kill);
- if (thd->mysys_var)
- {
- thd->mysys_var->abort= 1;
- mysql_mutex_lock(&thd->mysys_var->mutex);
- if (thd->mysys_var->current_cond)
- {
- for (uint i= 0; i < 2; i++)
- {
- int ret= mysql_mutex_trylock(thd->mysys_var->current_mutex);
- mysql_cond_broadcast(thd->mysys_var->current_cond);
- if (!ret)
- {
- /* Thread has surely got the signal, unlock and abort */
- mysql_mutex_unlock(thd->mysys_var->current_mutex);
- break;
- }
- sleep(1);
- }
- }
- mysql_mutex_unlock(&thd->mysys_var->mutex);
- }
+ thd->abort_current_cond_wait(true);
mysql_mutex_unlock(&thd->LOCK_thd_kill);
- if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
}
@@ -1905,6 +1871,7 @@ extern "C" void unireg_abort(int exit_code)
wsrep_deinit(true);
wsrep_deinit_server();
}
+ wsrep_sst_auth_free();
#endif // WITH_WSREP
clean_up(!opt_abort && (exit_code || !opt_bootstrap)); /* purecov: inspected */
@@ -2140,8 +2107,6 @@ static void clean_up_mutexes()
mysql_cond_destroy(&COND_prepare_ordered);
mysql_mutex_destroy(&LOCK_after_binlog_sync);
mysql_mutex_destroy(&LOCK_commit_ordered);
- mysql_mutex_destroy(&LOCK_slave_background);
- mysql_cond_destroy(&COND_slave_background);
#ifndef EMBEDDED_LIBRARY
mysql_mutex_destroy(&LOCK_error_log);
#endif
@@ -3288,7 +3253,13 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
}
break;
case SIGHUP:
+#if defined(SI_KERNEL)
if (!abort_loop && origin != SI_KERNEL)
+#elif defined(SI_USER)
+ if (!abort_loop && origin <= SI_USER)
+#else
+ if (!abort_loop)
+#endif
{
int not_used;
mysql_print_status(); // Print some debug info
@@ -4555,9 +4526,6 @@ static int init_thread_environment()
MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered,
MY_MUTEX_INIT_SLOW);
- mysql_mutex_init(key_LOCK_slave_background, &LOCK_slave_background,
- MY_MUTEX_INIT_SLOW);
- mysql_cond_init(key_COND_slave_background, &COND_slave_background, NULL);
#ifdef HAVE_OPENSSL
mysql_mutex_init(key_LOCK_des_key_file,
@@ -5255,6 +5223,10 @@ static int init_server_components()
that there are unprocessed options.
*/
my_getopt_skip_unknown= 0;
+#ifdef WITH_WSREP
+ if (wsrep_recovery)
+ my_getopt_skip_unknown= TRUE;
+#endif
if ((ho_error= handle_options(&remaining_argc, &remaining_argv, no_opts,
mysqld_get_one_option)))
@@ -5264,20 +5236,27 @@ static int init_server_components()
remaining_argv--;
my_getopt_skip_unknown= TRUE;
- if (remaining_argc > 1)
+#ifdef WITH_WSREP
+ if (!wsrep_recovery)
{
- fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n",
- my_progname, remaining_argv[1]);
- unireg_abort(1);
+#endif
+ if (remaining_argc > 1)
+ {
+ fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n",
+ my_progname, remaining_argv[1]);
+ unireg_abort(1);
+ }
+#ifdef WITH_WSREP
}
+#endif
}
- if (init_io_cache_encryption())
- unireg_abort(1);
-
if (opt_abort)
unireg_abort(0);
+ if (init_io_cache_encryption())
+ unireg_abort(1);
+
/* if the errmsg.sys is not loaded, terminate to maintain behaviour */
if (!DEFAULT_ERRMSGS[0][0])
unireg_abort(1);
@@ -5729,10 +5708,7 @@ int mysqld_main(int argc, char **argv)
}
#ifdef WITH_WSREP
- WSREP_ON_= (global_system_variables.wsrep_on &&
- wsrep_provider &&
- strcmp(wsrep_provider, WSREP_NONE));
-
+ wsrep_set_wsrep_on();
if (WSREP_ON && wsrep_check_opts()) unireg_abort(1);
#endif
@@ -5823,9 +5799,12 @@ int mysqld_main(int argc, char **argv)
wsrep_init_startup (false);
}
- WSREP_DEBUG("Startup creating %ld applier threads running %lu",
- wsrep_slave_threads - 1, wsrep_running_applier_threads);
- wsrep_create_appliers(wsrep_slave_threads - 1);
+ if (wsrep_cluster_address_exists())
+ {
+ WSREP_DEBUG("Startup creating %ld applier threads running %lu",
+ wsrep_slave_threads - 1, wsrep_running_applier_threads);
+ wsrep_create_appliers(wsrep_slave_threads - 1);
+ }
}
}
@@ -7574,8 +7553,11 @@ static int show_memory_used(THD *thd, SHOW_VAR *var, char *buff,
var->type= SHOW_LONGLONG;
var->value= buff;
if (scope == OPT_GLOBAL)
+ {
+ calc_sum_of_all_status_if_needed(status_var);
*(longlong*) buff= (status_var->global_memory_used +
status_var->local_memory_used);
+ }
else
*(longlong*) buff= status_var->local_memory_used;
return 0;
diff --git a/sql/mysqld.h b/sql/mysqld.h
index bd45ff7b798..f4d0d891a0f 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -615,8 +615,7 @@ extern mysql_mutex_t
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_active_mi, LOCK_manager, LOCK_user_conn,
- LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count,
- LOCK_slave_background;
+ LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count ;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_global_system_variables;
extern mysql_rwlock_t LOCK_all_status_vars;
extern mysql_mutex_t LOCK_start_thread;
@@ -631,7 +630,6 @@ extern mysql_rwlock_t LOCK_ssl_refresh;
extern mysql_prlock_t LOCK_system_variables_hash;
extern mysql_cond_t COND_start_thread;
extern mysql_cond_t COND_manager;
-extern mysql_cond_t COND_slave_background;
extern Atomic_counter<uint32_t> thread_count;
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 1a7ac1044c3..d47aa1ee41e 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -449,6 +449,7 @@ void print_range_for_non_indexed_field(String *out, Field *field,
static void print_min_range_operator(String *out, const ha_rkey_function flag);
static void print_max_range_operator(String *out, const ha_rkey_function flag);
+static bool is_field_an_unique_index(RANGE_OPT_PARAM *param, Field *field);
/*
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
@@ -3636,8 +3637,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
{
- /* Do the same as print_key() does */
- my_bitmap_map *old_map;
+ /* Do the same as print_key() does */
if (field->real_maybe_null())
{
@@ -3649,10 +3649,10 @@ void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
field->set_notnull();
ptr++;
}
- old_map= dbug_tmp_use_all_columns(field->table,
- field->table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table,
+ &field->table->write_set);
field->set_key_image(ptr, len);
- dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->write_set, old_map);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -3867,7 +3867,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
PART_PRUNE_PARAM prune_param;
MEM_ROOT alloc;
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
prune_param.part_info= part_info;
init_sql_alloc(&alloc, "prune_partitions",
@@ -3884,7 +3884,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
}
dbug_tmp_use_all_columns(table, old_sets,
- table->read_set, table->write_set);
+ &table->read_set, &table->write_set);
range_par->thd= thd;
range_par->table= table;
/* range_par->cond doesn't need initialization */
@@ -3981,7 +3981,7 @@ all_used:
retval= FALSE; // some partitions are used
mark_all_partitions_as_used(prune_param.part_info);
end:
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
thd->no_errors=0;
thd->mem_root= range_par->old_root;
free_root(&alloc,MYF(0)); // Return memory & allocator
@@ -7690,6 +7690,21 @@ SEL_TREE *Item_bool_func::get_ne_mm_tree(RANGE_OPT_PARAM *param,
}
+SEL_TREE *Item_func_ne::get_func_mm_tree(RANGE_OPT_PARAM *param,
+ Field *field, Item *value)
+{
+ DBUG_ENTER("Item_func_ne::get_func_mm_tree");
+ /*
+ If this condition is a "col1<>...", where there is a UNIQUE KEY(col1),
+ do not construct a SEL_TREE from it. A condition that excludes just one
+ row in the table is not selective (unless there are only a few rows)
+ */
+ if (is_field_an_unique_index(param, field))
+ DBUG_RETURN(NULL);
+ DBUG_RETURN(get_ne_mm_tree(param, field, value, value));
+}
+
+
SEL_TREE *Item_func_between::get_func_mm_tree(RANGE_OPT_PARAM *param,
Field *field, Item *value)
{
@@ -7788,28 +7803,16 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
DBUG_RETURN(0);
/*
- If this is "unique_key NOT IN (...)", do not consider it sargable (for
- any index, not just the unique one). The logic is as follows:
+ if this is a "col1 NOT IN (...)", and there is a UNIQUE KEY(col1), do
+ not constuct a SEL_TREE from it. The rationale is as follows:
- if there are only a few constants, this condition is not selective
(unless the table is also very small in which case we won't gain
anything)
- - If there are a lot of constants, the overhead of building and
+ - if there are a lot of constants, the overhead of building and
processing enormous range list is not worth it.
*/
- if (param->using_real_indexes)
- {
- key_map::Iterator it(field->key_start);
- uint key_no;
- while ((key_no= it.next_bit()) != key_map::Iterator::BITMAP_END)
- {
- KEY *key_info= &param->table->key_info[key_no];
- if (key_info->user_defined_key_parts == 1 &&
- (key_info->flags & HA_NOSAME))
- {
- DBUG_RETURN(0);
- }
- }
- }
+ if (is_field_an_unique_index(param, field))
+ DBUG_RETURN(0);
/* Get a SEL_TREE for "(-inf|NULL) < X < c_0" interval. */
uint i=0;
@@ -8527,6 +8530,38 @@ SEL_TREE *Item_equal::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
}
+/*
+ @brief
+ Check if there is an one-segment unique key that matches the field exactly
+
+ @detail
+ In the future we could also add "almost unique" indexes where any value is
+ present only in a few rows (but necessarily exactly one row)
+*/
+static bool is_field_an_unique_index(RANGE_OPT_PARAM *param, Field *field)
+{
+ DBUG_ENTER("is_field_an_unique_index");
+
+ // The check for using_real_indexes is there because of the heuristics
+ // this function is used for.
+ if (param->using_real_indexes)
+ {
+ key_map::Iterator it(field->key_start);
+ uint key_no;
+ while ((key_no= it++) != key_map::Iterator::BITMAP_END)
+ {
+ KEY *key_info= &field->table->key_info[key_no];
+ if (key_info->user_defined_key_parts == 1 &&
+ (key_info->flags & HA_NOSAME))
+ {
+ DBUG_RETURN(true);
+ }
+ }
+ }
+ DBUG_RETURN(false);
+}
+
+
SEL_TREE *
Item_bool_func::get_mm_parts(RANGE_OPT_PARAM *param, Field *field,
Item_func::Functype type, Item *value)
@@ -15658,8 +15693,8 @@ static void
print_sel_arg_key(Field *field, const uchar *key, String *out)
{
TABLE *table= field->table;
- my_bitmap_map *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ MY_BITMAP *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
if (field->real_maybe_null())
{
@@ -15679,7 +15714,7 @@ print_sel_arg_key(Field *field, const uchar *key, String *out)
field->val_str(out);
end:
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
}
@@ -15774,9 +15809,9 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
const uchar *key_end= key+used_length;
uint store_length;
TABLE *table= key_part->field->table;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
for (; key < key_end; key+=store_length, key_part++)
{
@@ -15803,7 +15838,7 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
}
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
}
@@ -15811,16 +15846,16 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
TABLE *table;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
DBUG_ENTER("print_quick");
if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
table= quick->head;
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
quick->dbug_dump(0, TRUE);
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
@@ -16043,8 +16078,8 @@ void print_range_for_non_indexed_field(String *out, Field *field,
KEY_MULTI_RANGE *range)
{
TABLE *table= field->table;
- my_bitmap_map *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ MY_BITMAP *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
if (range->start_key.length)
{
@@ -16059,7 +16094,7 @@ void print_range_for_non_indexed_field(String *out, Field *field,
print_max_range_operator(out, range->end_key.flag);
field->print_key_part_value(out, range->end_key.key, field->key_length());
}
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
}
@@ -16126,8 +16161,8 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
StringBuffer<128> tmp(system_charset_info);
TABLE *table= field->table;
uint store_length;
- my_bitmap_map *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ MY_BITMAP *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
const uchar *key_end= key+used_length;
for (; key < key_end; key+=store_length, key_part++)
@@ -16140,7 +16175,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
if (key + store_length < key_end)
out->append(STRING_WITH_LEN(","));
}
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
out->append(STRING_WITH_LEN(")"));
}
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index 6807a623b73..395422de3c3 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -204,7 +204,7 @@ struct SplM_field_info
struct SplM_plan_info
{
/* The cached splitting execution plan P */
- struct st_position *best_positions;
+ POSITION *best_positions;
/* The cost of the above plan */
double cost;
/* Selectivity of splitting used in P */
@@ -236,6 +236,8 @@ public:
SplM_field_info *spl_fields;
/* The number of elements in the above list */
uint spl_field_cnt;
+ /* The list of equalities injected into WHERE for split optimization */
+ List<Item> inj_cond_list;
/* Contains the structures to generate all KEYUSEs for pushable equalities */
List<KEY_FIELD> added_key_fields;
/* The cache of evaluated execution plans for 'join' with pushed equalities */
@@ -1047,22 +1049,22 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
bool JOIN::inject_best_splitting_cond(table_map remaining_tables)
{
Item *inj_cond= 0;
- List<Item> inj_cond_list;
+ List<Item> *inj_cond_list= &spl_opt_info->inj_cond_list;
List_iterator<KEY_FIELD> li(spl_opt_info->added_key_fields);
KEY_FIELD *added_key_field;
while ((added_key_field= li++))
{
if (remaining_tables & added_key_field->val->used_tables())
continue;
- if (inj_cond_list.push_back(added_key_field->cond, thd->mem_root))
+ if (inj_cond_list->push_back(added_key_field->cond, thd->mem_root))
return true;
}
- DBUG_ASSERT(inj_cond_list.elements);
- switch (inj_cond_list.elements) {
+ DBUG_ASSERT(inj_cond_list->elements);
+ switch (inj_cond_list->elements) {
case 1:
- inj_cond= inj_cond_list.head(); break;
+ inj_cond= inj_cond_list->head(); break;
default:
- inj_cond= new (thd->mem_root) Item_cond_and(thd, inj_cond_list);
+ inj_cond= new (thd->mem_root) Item_cond_and(thd, *inj_cond_list);
if (!inj_cond)
return true;
}
@@ -1082,6 +1084,40 @@ bool JOIN::inject_best_splitting_cond(table_map remaining_tables)
/**
@brief
+ Test if equality is injected for split optimization
+
+ @param
+ eq_item equality to to test
+
+ @retval
+ true eq_item is equality injected for split optimization
+ false otherwise
+*/
+
+bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
+{
+ Item *left_item= eq_item->arguments()[0]->real_item();
+ if (left_item->type() != Item::FIELD_ITEM)
+ return false;
+ Field *field= ((Item_field *) left_item)->field;
+ if (!field->table->reginfo.join_tab)
+ return false;
+ JOIN *join= field->table->reginfo.join_tab->join;
+ if (!join->spl_opt_info)
+ return false;
+ List_iterator_fast<Item> li(join->spl_opt_info->inj_cond_list);
+ Item *item;
+ while ((item= li++))
+ {
+ if (item == eq_item)
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief
Fix the splitting chosen for a splittable table in the final query plan
@param
@@ -1149,7 +1185,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
bool JOIN::fix_all_splittings_in_plan()
{
table_map prev_tables= 0;
- table_map all_tables= (1 << table_count) - 1;
+ table_map all_tables= (table_map(1) << table_count) - 1;
for (uint tablenr= 0; tablenr < table_count; tablenr++)
{
POSITION *cur_pos= &best_positions[tablenr];
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index f7349e7a1bf..7bd778e339f 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -2990,7 +2990,7 @@ void advance_sj_state(JOIN *join, table_map remaining_tables, uint idx,
}
-void Sj_materialization_picker::set_from_prev(struct st_position *prev)
+void Sj_materialization_picker::set_from_prev(POSITION *prev)
{
if (prev->sjmat_picker.is_used)
set_empty();
@@ -3176,7 +3176,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
}
-void LooseScan_picker::set_from_prev(struct st_position *prev)
+void LooseScan_picker::set_from_prev(POSITION *prev)
{
if (prev->loosescan_picker.is_used)
set_empty();
@@ -3197,7 +3197,7 @@ bool LooseScan_picker::check_qep(JOIN *join,
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos)
+ POSITION *loose_scan_pos)
{
POSITION *first= join->positions + first_loosescan_table;
/*
@@ -3275,7 +3275,7 @@ bool LooseScan_picker::check_qep(JOIN *join,
return FALSE;
}
-void Firstmatch_picker::set_from_prev(struct st_position *prev)
+void Firstmatch_picker::set_from_prev(POSITION *prev)
{
if (prev->firstmatch_picker.is_used)
invalidate_firstmatch_prefix();
@@ -5789,8 +5789,8 @@ Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond,
((Item_func *) item)->functype() == Item_func::EQ_FUNC &&
check_simple_equality(thd,
Item::Context(Item::ANY_SUBST,
- ((Item_func_equal *)item)->compare_type_handler(),
- ((Item_func_equal *)item)->compare_collation()),
+ ((Item_func_eq *)item)->compare_type_handler(),
+ ((Item_func_eq *)item)->compare_collation()),
((Item_func *)item)->arguments()[0],
((Item_func *)item)->arguments()[1],
&new_cond_equal))
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index af2d9ddc2e7..27360d4a10c 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -842,7 +842,10 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo,
if (is_field_part)
{
if (between || eq_type)
+ {
*range_fl&= ~(NO_MAX_RANGE | NO_MIN_RANGE);
+ *range_fl&= ~(max_fl ? NEAR_MAX : NEAR_MIN);
+ }
else
{
*range_fl&= ~(max_fl ? NO_MAX_RANGE : NO_MIN_RANGE);
diff --git a/sql/partition_element.h b/sql/partition_element.h
index ff0d0d59fc4..e0a519065cc 100644
--- a/sql/partition_element.h
+++ b/sql/partition_element.h
@@ -144,6 +144,7 @@ public:
part_min_rows(part_elem->part_min_rows),
range_value(0), partition_name(NULL),
tablespace_name(part_elem->tablespace_name),
+ log_entry(NULL),
part_comment(part_elem->part_comment),
data_file_name(part_elem->data_file_name),
index_file_name(part_elem->index_file_name),
@@ -152,6 +153,8 @@ public:
part_state(part_elem->part_state),
nodegroup_id(part_elem->nodegroup_id),
has_null_value(FALSE),
+ signed_flag(part_elem->signed_flag),
+ max_value(part_elem->max_value),
id(part_elem->id),
empty(part_elem->empty),
type(CONVENTIONAL)
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 9f08964e62c..a8459438be7 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1449,13 +1449,13 @@ void partition_info::print_no_partition_found(TABLE *table_arg, myf errflag)
buf_ptr= (char*)"from column_list";
else
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table_arg, table_arg->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table_arg, &table_arg->read_set);
if (part_expr->null_value)
buf_ptr= (char*)"NULL";
else
longlong10_to_str(err_value, buf,
part_expr->unsigned_flag ? 10 : -10);
- dbug_tmp_restore_column_map(table_arg->read_set, old_map);
+ dbug_tmp_restore_column_map(&table_arg->read_set, old_map);
}
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, errflag, buf_ptr);
}
diff --git a/sql/protocol.cc b/sql/protocol.cc
index aa9651e974c..eb7f19d2bd0 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -1269,15 +1269,15 @@ bool Protocol_text::store(Field *field)
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
#ifdef DBUG_ASSERT_EXISTS
TABLE *table= field->table;
- my_bitmap_map *old_map= 0;
+ MY_BITMAP *old_map= 0;
if (table->file)
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
#endif
field->val_str(&str);
#ifdef DBUG_ASSERT_EXISTS
if (old_map)
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
#endif
return store_string_aux(str.ptr(), str.length(), str.charset(), tocs);
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 154636480ca..35901cb5263 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -4,6 +4,7 @@
#include "rpl_mi.h"
#include "sql_parse.h"
#include "debug_sync.h"
+#include "sql_repl.h"
#include "wsrep_mysqld.h"
#ifdef WITH_WSREP
#include "wsrep_trans_observer.h"
@@ -100,7 +101,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
return;
mysql_mutex_lock(&rli->data_lock);
- cmp= strcmp(rli->group_relay_log_name, qev->event_relay_log_name);
+ cmp= compare_log_name(rli->group_relay_log_name, qev->event_relay_log_name);
if (cmp < 0)
{
rli->group_relay_log_pos= qev->future_event_relay_log_pos;
@@ -109,7 +110,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
rli->group_relay_log_pos < qev->future_event_relay_log_pos)
rli->group_relay_log_pos= qev->future_event_relay_log_pos;
- cmp= strcmp(rli->group_master_log_name, qev->future_event_master_log_name);
+ cmp= compare_log_name(rli->group_master_log_name, qev->future_event_master_log_name);
if (cmp < 0)
{
strcpy(rli->group_master_log_name, qev->future_event_master_log_name);
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index bcdff1e33a8..c8f77acf523 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -991,7 +991,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
if (rgi->is_parallel_exec)
{
/* In case of parallel replication, do not update the position backwards. */
- int cmp= strcmp(group_relay_log_name, rgi->event_relay_log_name);
+ int cmp= compare_log_name(group_relay_log_name, rgi->event_relay_log_name);
if (cmp < 0)
{
group_relay_log_pos= rgi->future_event_relay_log_pos;
@@ -1003,7 +1003,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
In the parallel case we need to update the master_log_name here, rather
than in Rotate_log_event::do_update_pos().
*/
- cmp= strcmp(group_master_log_name, rgi->future_event_master_log_name);
+ cmp= compare_log_name(group_master_log_name, rgi->future_event_master_log_name);
if (cmp <= 0)
{
if (cmp < 0)
diff --git a/sql/semisync_master_ack_receiver.cc b/sql/semisync_master_ack_receiver.cc
index e189fc5f631..b24d6452480 100644
--- a/sql/semisync_master_ack_receiver.cc
+++ b/sql/semisync_master_ack_receiver.cc
@@ -267,6 +267,11 @@ void Ack_receiver::run()
net_clear(&net, 0);
net.vio= &slave->vio;
+ /*
+ Set compress flag. This is needed to support
+ Slave_compress_protocol flag enabled Slaves
+ */
+ net.compress= slave->thd->net.compress;
len= my_net_read(&net);
if (likely(len != packet_error))
diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc
index 4fc927cfd86..14f136ca480 100644
--- a/sql/service_wsrep.cc
+++ b/sql/service_wsrep.cc
@@ -37,6 +37,16 @@ extern "C" void wsrep_thd_UNLOCK(const THD *thd)
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
+extern "C" void wsrep_thd_kill_LOCK(const THD *thd)
+{
+ mysql_mutex_lock(&thd->LOCK_thd_kill);
+}
+
+extern "C" void wsrep_thd_kill_UNLOCK(const THD *thd)
+{
+ mysql_mutex_unlock(&thd->LOCK_thd_kill);
+}
+
extern "C" const char* wsrep_thd_client_state_str(const THD *thd)
{
return wsrep::to_c_string(thd->wsrep_cs().state());
@@ -110,15 +120,23 @@ extern "C" my_bool wsrep_get_debug()
return wsrep_debug;
}
+/*
+ Test if this connection is a true local (user) connection and not
+ a replication or wsrep applier thread.
+
+ Note that this is only usable for galera (as there are other kinds
+ of system threads, and only if WSREP_NNULL() is tested by the caller.
+ */
extern "C" my_bool wsrep_thd_is_local(const THD *thd)
{
/*
- async replication IO and background threads have nothing to replicate in the cluster,
- marking them as non-local here to prevent write set population and replication
+ async replication IO and background threads have nothing to
+ replicate in the cluster, marking them as non-local here to
+ prevent write set population and replication
- async replication SQL thread, applies client transactions from mariadb master
- and will be replicated into cluster
- */
+ async replication SQL thread, applies client transactions from
+ mariadb master and will be replicated into cluster
+ */
return (
thd->system_thread != SYSTEM_THREAD_SLAVE_BACKGROUND &&
thd->system_thread != SYSTEM_THREAD_SLAVE_IO &&
@@ -200,16 +218,8 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd,
extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd,
my_bool signal)
{
- DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort",
- {
- const char act[]=
- "now "
- "SIGNAL sync.before_wsrep_thd_abort_reached "
- "WAIT_FOR signal.before_wsrep_thd_abort";
- DBUG_ASSERT(!debug_sync_set_action(bf_thd,
- STRING_WITH_LEN(act)));
- };);
-
+ mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill);
+ mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data);
my_bool ret= wsrep_bf_abort(bf_thd, victim_thd);
/*
Send awake signal if victim was BF aborted or does not
@@ -218,8 +228,6 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd,
*/
if ((ret || !wsrep_on(victim_thd)) && signal)
{
- mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data);
- mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_kill);
mysql_mutex_lock(&victim_thd->LOCK_thd_data);
if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id)
@@ -230,10 +238,8 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd,
return false;
}
- mysql_mutex_lock(&victim_thd->LOCK_thd_kill);
victim_thd->wsrep_aborter= bf_thd->thread_id;
victim_thd->awake_no_mutex(KILL_QUERY);
- mysql_mutex_unlock(&victim_thd->LOCK_thd_kill);
mysql_mutex_unlock(&victim_thd->LOCK_thd_data);
} else {
WSREP_DEBUG("wsrep_thd_bf_abort skipped awake");
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index ef0dfd5eb63..1c6a1326538 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -191,7 +191,7 @@ ER_DB_DROP_DELETE
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)"
jpn "データベース削除エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)"
- kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러('%-.192s'를 삭제할 수 ì—†ì니다, ì—러번호: %M)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러('%-.192s'를 삭제할 수 없습니다, ì—러번호: %M)"
nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %M)"
norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %M)"
pol "Bł?d podczas usuwania bazy danych (nie można usun?ć '%-.192s', bł?d %M)"
@@ -216,7 +216,7 @@ ER_DB_DROP_RMDIR
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)"
jpn "データベース削除エラー (ディレクトリ '%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)"
- kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러(rmdir '%-.192s'를 í•  수 ì—†ì니다, ì—러번호: %M)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러(rmdir '%-.192s'를 í•  수 없습니다, ì—러번호: %M)"
nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %M)"
norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %M)"
pol "Bł?d podczas usuwania bazy danych (nie można wykonać rmdir '%-.192s', bł?d %M)"
@@ -516,7 +516,7 @@ ER_DUP_KEY 23000
hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban"
ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'"
jpn "書ãè¾¼ã‚ã¾ã›ã‚“。表 '%-.192s' ã«é‡è¤‡ã™ã‚‹ã‚­ãƒ¼ãŒã‚ã‚Šã¾ã™ã€‚"
- kor "기ë¡í•  수 ì—†ì니다., í…Œì´ë¸” '%-.192s'ì—ì„œ 중복 키"
+ kor "기ë¡í•  수 없습니다., í…Œì´ë¸” '%-.192s'ì—ì„œ 중복 키"
nor "Kan ikke skrive, flere like nøkler i tabellen '%-.192s'"
norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.192s'"
pol "Nie można zapisać, powtórzone klucze w tabeli '%-.192s'"
@@ -641,7 +641,7 @@ ER_FILE_USED
hun "'%-.192s' a valtoztatas ellen zarolva"
ita "'%-.192s' e` soggetto a lock contro i cambiamenti"
jpn "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™ã€‚"
- kor "'%-.192s'ê°€ 변경할 수 ì—†ë„ë¡ ìž ê²¨ìžˆì니다."
+ kor "'%-.192s'ê°€ 변경할 수 ì—†ë„ë¡ ìž ê²¨ìžˆìŠµë‹ˆë‹¤."
nor "'%-.192s' er låst mot oppdateringer"
norwegian-ny "'%-.192s' er låst mot oppdateringar"
pol "'%-.192s' jest zablokowany na wypadek zmian"
@@ -691,7 +691,7 @@ ER_FORM_NOT_FOUND
hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz"
ita "La view '%-.192s' non esiste per '%-.192s'"
jpn "ビュー '%-.192s' 㯠'%-.192s' ã«å­˜åœ¨ã—ã¾ã›ã‚“。"
- kor "ë·° '%-.192s'ê°€ '%-.192s'ì—서는 존재하지 ì•Šì니다."
+ kor "ë·° '%-.192s'ê°€ '%-.192s'ì—서는 존재하지 않습니다."
nor "View '%-.192s' eksisterer ikke for '%-.192s'"
norwegian-ny "View '%-.192s' eksisterar ikkje for '%-.192s'"
pol "Widok '%-.192s' nie istnieje dla '%-.192s'"
@@ -739,7 +739,7 @@ ER_KEY_NOT_FOUND
hun "Nem talalhato a rekord '%-.192s'-ben"
ita "Impossibile trovare il record in '%-.192s'"
jpn "'%-.192s' ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
- kor "'%-.192s'ì—ì„œ 레코드를 ì°¾ì„ ìˆ˜ ì—†ì니다."
+ kor "'%-.192s'ì—ì„œ 레코드를 ì°¾ì„ ìˆ˜ 없습니다."
nor "Kan ikke finne posten i '%-.192s'"
norwegian-ny "Kan ikkje finne posten i '%-.192s'"
pol "Nie można znaleĽć rekordu w '%-.192s'"
@@ -985,7 +985,7 @@ ER_BAD_HOST_ERROR 08S01
hun "A gepnev nem allapithato meg a cimbol"
ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
jpn "IPアドレスã‹ã‚‰ãƒ›ã‚¹ãƒˆåを解決ã§ãã¾ã›ã‚“。"
- kor "ë‹¹ì‹ ì˜ ì»´í“¨í„°ì˜ í˜¸ìŠ¤íŠ¸ì´ë¦„ì„ ì–»ì„ ìˆ˜ ì—†ì니다."
+ kor "ë‹¹ì‹ ì˜ ì»´í“¨í„°ì˜ í˜¸ìŠ¤íŠ¸ì´ë¦„ì„ ì–»ì„ ìˆ˜ 없습니다."
nor "Kan ikke få tak i vertsnavn for din adresse"
norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse"
pol "Nie można otrzymać nazwy hosta dla twojego adresu"
@@ -1533,7 +1533,7 @@ ER_PARSE_ERROR 42000 s1009
hun "A %s a '%-.80T'-hez kozeli a %d sorban"
ita "%s vicino a '%-.80T' linea %d"
jpn "%s : '%-.80T' 付近 %d 行目"
- kor "'%s' ì—러 ê°™ì니다. ('%-.80T' 명령어 ë¼ì¸ %d)"
+ kor "'%s' ì—러 같습니다. ('%-.80T' 명령어 ë¼ì¸ %d)"
nor "%s nær '%-.80T' på linje %d"
norwegian-ny "%s attmed '%-.80T' på line %d"
pol "%s obok '%-.80T' w linii %d"
@@ -1658,7 +1658,7 @@ ER_TOO_MANY_KEYS 42000 S1009
hindi "बहà¥à¤¤ सारी KEYS निरà¥à¤¦à¤¿à¤·à¥à¤Ÿ हैं; अधिकतम %d KEYS की अनà¥à¤®à¤¤à¤¿ है"
ita "Troppe chiavi. Sono ammesse max %d chiavi"
jpn "索引ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚"
- kor "너무 ë§Žì€ í‚¤ê°€ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %dì˜ í‚¤ê°€ 가능함"
+ kor "너무 ë§Žì€ í‚¤ê°€ ì •ì˜ë˜ì–´ 있습니다.. 최대 %dì˜ í‚¤ê°€ 가능함"
nor "For mange nøkler spesifisert. Maks %d nøkler tillatt"
norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt"
pol "Okre?lono zbyt wiele kluczy. Dostępnych jest maksymalnie %d kluczy"
@@ -1683,7 +1683,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009
hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
jpn "索引ã®ã‚­ãƒ¼åˆ—指定ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚"
- kor "너무 ë§Žì€ í‚¤ 부분(parts)ë“¤ì´ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %d ë¶€ë¶„ì´ ê°€ëŠ¥í•¨"
+ kor "너무 ë§Žì€ í‚¤ 부분(parts)ë“¤ì´ ì •ì˜ë˜ì–´ 있습니다.. 최대 %d ë¶€ë¶„ì´ ê°€ëŠ¥í•¨"
nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt"
norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt"
pol "Okre?lono zbyt wiele czę?ci klucza. Dostępnych jest maksymalnie %d czę?ci"
@@ -2593,7 +2593,7 @@ ER_FIELD_SPECIFIED_TWICE 42000
hun "A(z) '%-.192s' mezot ketszer definialta"
ita "Campo '%-.192s' specificato 2 volte"
jpn "列 '%-.192s' ã¯2回指定ã•ã‚Œã¦ã„ã¾ã™ã€‚"
- kor "칼럼 '%-.192s'는 ë‘번 ì •ì˜ë˜ì–´ 있ì니다."
+ kor "칼럼 '%-.192s'는 ë‘번 ì •ì˜ë˜ì–´ 있습니다."
nor "Feltet '%-.192s' er spesifisert to ganger"
norwegian-ny "Feltet '%-.192s' er spesifisert to gangar"
pol "Field '%-.192s' specified twice"
@@ -2774,7 +2774,7 @@ ER_TOO_BIG_ROWSIZE 42000
hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia"
ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB"
jpn "行サイズãŒå¤§ãã™ãŽã¾ã™ã€‚ã“ã®è¡¨ã®æœ€å¤§è¡Œã‚µã‚¤ã‚ºã¯ BLOB ã‚’å«ã¾ãšã« %ld ã§ã™ã€‚æ ¼ç´æ™‚ã®ã‚ªãƒ¼ãƒãƒ¼ãƒ˜ãƒƒãƒ‰ã‚‚å«ã¾ã‚Œã¾ã™(マニュアルを確èªã—ã¦ãã ã•ã„)。列をTEXTã¾ãŸã¯BLOBã«å¤‰æ›´ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
- kor "너무 í° row 사ì´ì¦ˆìž…니다. BLOB를 계산하지 ì•Šê³  최대 row 사ì´ì¦ˆëŠ” %ld입니다. ì–¼ë§ˆê°„ì˜ í•„ë“œë“¤ì„ BLOBë¡œ 바꾸셔야 ê² êµ°ìš”.."
+ kor "너무 í° row 사ì´ì¦ˆìž…니다. BLOB를 계산하지 ì•Šê³  최대 row 사ì´ì¦ˆëŠ” %ld입니다. ì¼ë¶€ì—´ì„ BLOB ë˜ëŠ” TEXTë¡œ 변경해야 합니다."
por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs"
rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri"
rus "Слишком большой размер запиÑи. МакÑимальный размер Ñтроки, иÑÐºÐ»ÑŽÑ‡Ð°Ñ Ð¿Ð¾Ð»Ñ BLOB, - %ld. Возможно, вам Ñледует изменить тип некоторых полей на BLOB"
@@ -3077,7 +3077,7 @@ ER_PASSWORD_NO_MATCH 28000
hun "Nincs megegyezo sor a user tablaban"
ita "Impossibile trovare la riga corrispondente nella tabella user"
jpn "ユーザーテーブルã«è©²å½“ã™ã‚‹ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
- kor "ì‚¬ìš©ìž í…Œì´ë¸”ì—ì„œ ì¼ì¹˜í•˜ëŠ” ê²ƒì„ ì°¾ì„ ìˆ˜ ì—†ì니다."
+ kor "ì‚¬ìš©ìž í…Œì´ë¸”ì—ì„œ ì¼ì¹˜í•˜ëŠ” ê²ƒì„ ì°¾ì„ ìˆ˜ 없습니다."
por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)"
rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului"
rus "Ðевозможно отыÑкать подходÑщую запиÑÑŒ в таблице пользователей"
@@ -3219,7 +3219,7 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul"
ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY"
jpn "GROUP BYå¥ãŒç„¡ã„å ´åˆã€é›†è¨ˆé–¢æ•°(MIN(),MAX(),COUNT(),...)ã¨é€šå¸¸ã®åˆ—ã‚’åŒæ™‚ã«ä½¿ç”¨ã§ãã¾ã›ã‚“。"
- kor "Mixing of GROUP 칼럼s (MIN(),MAX(),COUNT(),...) with no GROUP 칼럼s is illegal if there is no GROUP BY clause"
+ kor "GROUP BY ì ˆ ì—†ì´ í˜¼í•©ëœ GROUP 함수 (MIN(),MAX(),COUNT(),...) 를 사용할 수 없습니다."
por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)"
rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY"
rus "Одновременное иÑпользование Ñгруппированных (GROUP) Ñтолбцов (MIN(),MAX(),COUNT(),...) Ñ Ð½ÐµÑгруппированными Ñтолбцами ÑвлÑетÑÑ Ð½ÐµÐºÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ñ‹Ð¼, еÑли в выражении еÑÑ‚ÑŒ GROUP BY"
@@ -3668,6 +3668,7 @@ ER_TOO_LONG_STRING 42000
est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga"
fre "La chaîne résultat est plus grande que 'max_allowed_packet'"
ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
+ kor "ê²°ê³¼ 문ìžì—´ì´ ì„¤ì •ëœ max_allowed_packet 값보다 í½ë‹ˆë‹¤."
hindi "रिजलà¥à¤Ÿ सà¥à¤Ÿà¥à¤°à¤¿à¤‚ग 'max_allowed_packet' से लंबा है"
hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
@@ -3689,6 +3690,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000
ger "Der verwendete Tabellentyp (%s) unterstützt keine BLOB- und TEXT-Felder"
hindi "सà¥à¤Ÿà¥‹à¤°à¥‡à¤œ इंजन %s BLOB/TEXT कॉलमà¥à¤¸ को सपोरà¥à¤Ÿ नहीं करता"
hun "A hasznalt tabla tipus (%s) nem tamogatja a BLOB/TEXT mezoket"
+ kor "스토리지 엔진 (%s)는 BLOB/TEXT ì»¬ëŸ¼ì„ ì§€ì›í•˜ì§€ 않습니다."
ita "Il tipo di tabella usata (%s) non supporta colonne di tipo BLOB/TEXT"
por "Tipo de tabela usado (%s) não permite colunas BLOB/TEXT"
rum "Tipul de tabela folosit (%s) nu suporta coloane de tip BLOB/TEXT"
@@ -3706,6 +3708,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
fre "Ce type de table (%s) ne supporte pas les colonnes AUTO_INCREMENT"
ger "Der verwendete Tabellentyp (%s) unterstützt keine AUTO_INCREMENT-Felder"
hindi "सà¥à¤Ÿà¥‹à¤°à¥‡à¤œ इंजन %s AUTO_INCREMENT कॉलमà¥à¤¸ को सपोरà¥à¤Ÿ नहीं करता"
+ kor "스토리지 엔진 (%s)는 AUTO_INCREMENT를 지ì›í•˜ì§€ 않습니다."
hun "A hasznalt tabla tipus (%s) nem tamogatja az AUTO_INCREMENT tipusu mezoket"
ita "Il tipo di tabella usata (%s) non supporta colonne di tipo AUTO_INCREMENT"
por "Tipo de tabela usado (%s) não permite colunas AUTO_INCREMENT"
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 5439f13b3f4..f27676bee19 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -15,6 +15,7 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
#include "mariadb.h"
+#include "my_dbug.h"
#include <signal.h>
//#include "sys_vars.h"
@@ -30,6 +31,11 @@
#define SIGNAL_FMT "signal %d"
#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
+
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
@@ -51,7 +57,7 @@ extern const char *optimizer_switch_names[];
static inline void output_core_info()
{
/* proc is optional on some BSDs so it can't hurt to look */
-#ifdef HAVE_READLINK
+#if defined(HAVE_READLINK) && !defined(__APPLE__) && !defined(__FreeBSD__)
char buff[PATH_MAX];
ssize_t len;
int fd;
@@ -77,6 +83,13 @@ static inline void output_core_info()
my_close(fd, MYF(0));
}
#endif
+#elif defined(__APPLE__) || defined(__FreeBSD__)
+ char buff[PATH_MAX];
+ size_t len = sizeof(buff);
+ if (sysctlbyname("kern.corefile", buff, &len, NULL, 0) == 0)
+ {
+ my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff);
+ }
#else
char buff[80];
my_getwd(buff, sizeof(buff), 0);
@@ -118,8 +131,8 @@ extern "C" sig_handler handle_fatal_signal(int sig)
my_safe_printf_stderr("Fatal " SIGNAL_FMT " while backtracing\n", sig);
goto end;
}
-
segfaulted = 1;
+ DBUG_PRINT("error", ("handling fatal signal"));
curr_time= my_time(0);
localtime_r(&curr_time, &tm);
diff --git a/sql/slave.cc b/sql/slave.cc
index 9d4049c6452..31bd9372a14 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -64,6 +64,7 @@
#include "rpl_parallel.h"
#include "sql_show.h"
#include "semisync_slave.h"
+#include "sql_manager.h"
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
@@ -362,10 +363,33 @@ end:
return err;
}
+static THD *new_bg_THD()
+{
+ THD *thd= new THD(next_thread_id());
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+ thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
+ thd->security_ctx->skip_grants();
+ thd->set_command(COM_DAEMON);
+ thd->variables.wsrep_on= 0;
+ return thd;
+}
-static void
-handle_gtid_pos_auto_create_request(THD *thd, void *hton)
+static void bg_gtid_delete_pending(void *)
{
+ THD *thd= new_bg_THD();
+
+ rpl_slave_state::list_element *list;
+ list= rpl_global_gtid_slave_state->gtid_grab_pending_delete_list();
+ rpl_global_gtid_slave_state->gtid_delete_pending(thd, &list);
+ if (list)
+ rpl_global_gtid_slave_state->put_back_list(list);
+ delete thd;
+}
+
+static void bg_gtid_pos_auto_create(void *hton)
+{
+ THD *thd= NULL;
int UNINIT_VAR(err);
plugin_ref engine= NULL, *auto_engines;
rpl_slave_state::gtid_pos_table *entry;
@@ -377,7 +401,6 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
it.
*/
mysql_mutex_lock(&LOCK_global_system_variables);
- engine= NULL;
for (auto_engines= opt_gtid_pos_auto_plugins;
auto_engines && *auto_engines;
++auto_engines)
@@ -422,6 +445,7 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
table_name.str= loc_table_name.c_ptr_safe();
table_name.length= loc_table_name.length();
+ thd= new_bg_THD();
err= gtid_pos_table_creation(thd, engine, &table_name);
if (err)
{
@@ -449,46 +473,16 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
end:
+ delete thd;
if (engine)
plugin_unlock(NULL, engine);
}
-
-static bool slave_background_thread_running;
-static bool slave_background_thread_stop;
static bool slave_background_thread_gtid_loaded;
-static struct slave_background_kill_t {
- slave_background_kill_t *next;
- THD *to_kill;
-} *slave_background_kill_list;
-
-static struct slave_background_gtid_pos_create_t {
- slave_background_gtid_pos_create_t *next;
- void *hton;
-} *slave_background_gtid_pos_create_list;
-
-static volatile bool slave_background_gtid_pending_delete_flag;
-
-
-pthread_handler_t
-handle_slave_background(void *arg __attribute__((unused)))
+static void bg_rpl_load_gtid_slave_state(void *)
{
- THD *thd;
- PSI_stage_info old_stage;
- bool stop;
-
- my_thread_init();
- thd= new THD(next_thread_id());
- thd->thread_stack= (char*) &thd; /* Set approximate stack start */
- thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
- thd->store_globals();
- thd->security_ctx->skip_grants();
- thd->set_command(COM_DAEMON);
-#ifdef WITH_WSREP
- thd->variables.wsrep_on= 0;
-#endif
-
+ THD *thd= new_bg_THD();
thd_proc_info(thd, "Loading slave GTID position from table");
if (rpl_load_gtid_slave_state(thd))
sql_print_warning("Failed to load slave replication state from table "
@@ -497,207 +491,62 @@ handle_slave_background(void *arg __attribute__((unused)))
thd->get_stmt_da()->sql_errno(),
thd->get_stmt_da()->message());
- mysql_mutex_lock(&LOCK_slave_background);
+ // hijacking global_rpl_thread_pool cond here - it's only once on startup
+ mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
slave_background_thread_gtid_loaded= true;
- mysql_cond_broadcast(&COND_slave_background);
-
- THD_STAGE_INFO(thd, stage_slave_background_process_request);
- do
- {
- slave_background_kill_t *kill_list;
- slave_background_gtid_pos_create_t *create_list;
- bool pending_deletes;
-
- thd->ENTER_COND(&COND_slave_background, &LOCK_slave_background,
- &stage_slave_background_wait_request,
- &old_stage);
- for (;;)
- {
- stop= thd->killed || slave_background_thread_stop;
- kill_list= slave_background_kill_list;
- create_list= slave_background_gtid_pos_create_list;
- pending_deletes= slave_background_gtid_pending_delete_flag;
- if (stop || kill_list || create_list || pending_deletes)
- break;
- mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
- }
-
- slave_background_kill_list= NULL;
- slave_background_gtid_pos_create_list= NULL;
- slave_background_gtid_pending_delete_flag= false;
- thd->EXIT_COND(&old_stage);
-
- while (kill_list)
- {
- slave_background_kill_t *p = kill_list;
- THD *to_kill= p->to_kill;
- kill_list= p->next;
-
- to_kill->awake(KILL_CONNECTION);
- mysql_mutex_lock(&to_kill->LOCK_wakeup_ready);
- to_kill->rgi_slave->killed_for_retry=
- rpl_group_info::RETRY_KILL_KILLED;
- mysql_cond_broadcast(&to_kill->COND_wakeup_ready);
- mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready);
- my_free(p);
- }
-
- while (create_list)
- {
- slave_background_gtid_pos_create_t *next= create_list->next;
- void *hton= create_list->hton;
- handle_gtid_pos_auto_create_request(thd, hton);
- my_free(create_list);
- create_list= next;
- }
-
- if (pending_deletes)
- {
- rpl_slave_state::list_element *list;
-
- slave_background_gtid_pending_delete_flag= false;
- list= rpl_global_gtid_slave_state->gtid_grab_pending_delete_list();
- rpl_global_gtid_slave_state->gtid_delete_pending(thd, &list);
- if (list)
- rpl_global_gtid_slave_state->put_back_list(list);
- }
-
- mysql_mutex_lock(&LOCK_slave_background);
- } while (!stop);
-
- slave_background_thread_running= false;
- mysql_cond_broadcast(&COND_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
-
+ mysql_cond_signal(&global_rpl_thread_pool.COND_rpl_thread_pool);
+ mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
delete thd;
-
- my_thread_end();
- return 0;
}
+static void bg_slave_kill(void *victim)
+{
+ THD *to_kill= (THD *)victim;
+ to_kill->awake(KILL_CONNECTION);
+ mysql_mutex_lock(&to_kill->LOCK_wakeup_ready);
+ to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED;
+ mysql_cond_broadcast(&to_kill->COND_wakeup_ready);
+ mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready);
+}
-
-void
-slave_background_kill_request(THD *to_kill)
+void slave_background_kill_request(THD *to_kill)
{
if (to_kill->rgi_slave->killed_for_retry)
return; // Already deadlock killed.
- slave_background_kill_t *p=
- (slave_background_kill_t *)my_malloc(sizeof(*p), MYF(MY_WME));
- if (p)
- {
- p->to_kill= to_kill;
- to_kill->rgi_slave->killed_for_retry=
- rpl_group_info::RETRY_KILL_PENDING;
- mysql_mutex_lock(&LOCK_slave_background);
- p->next= slave_background_kill_list;
- slave_background_kill_list= p;
- mysql_cond_signal(&COND_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
- }
+ to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_PENDING;
+ mysql_manager_submit(bg_slave_kill, to_kill);
}
-
/*
This function must only be called from a slave SQL thread (or worker thread),
to ensure that the table_entry will not go away before we can lock the
LOCK_slave_state.
*/
-void
-slave_background_gtid_pos_create_request(
+void slave_background_gtid_pos_create_request(
rpl_slave_state::gtid_pos_table *table_entry)
{
- slave_background_gtid_pos_create_t *p;
-
if (table_entry->state != rpl_slave_state::GTID_POS_AUTO_CREATE)
return;
- p= (slave_background_gtid_pos_create_t *)my_malloc(sizeof(*p), MYF(MY_WME));
- if (!p)
- return;
mysql_mutex_lock(&rpl_global_gtid_slave_state->LOCK_slave_state);
if (table_entry->state != rpl_slave_state::GTID_POS_AUTO_CREATE)
{
- my_free(p);
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
return;
}
table_entry->state= rpl_slave_state::GTID_POS_CREATE_REQUESTED;
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
- p->hton= table_entry->table_hton;
- mysql_mutex_lock(&LOCK_slave_background);
- p->next= slave_background_gtid_pos_create_list;
- slave_background_gtid_pos_create_list= p;
- mysql_cond_signal(&COND_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
+ mysql_manager_submit(bg_gtid_pos_auto_create, table_entry->table_hton);
}
/*
- Request the slave background thread to delete no longer used rows from the
+ Request the manager thread to delete no longer used rows from the
mysql.gtid_slave_pos* tables.
-
- This is called from time-critical rpl_slave_state::update(), so we avoid
- taking any locks here. This means we may race with the background thread
- to occasionally lose a signal. This is not a problem; any pending rows to
- be deleted will just be deleted a bit later as part of the next batch.
*/
-void
-slave_background_gtid_pending_delete_request(void)
+void slave_background_gtid_pending_delete_request(void)
{
- slave_background_gtid_pending_delete_flag= true;
- mysql_cond_signal(&COND_slave_background);
-}
-
-
-/*
- Start the slave background thread.
-
- This thread is currently used for two purposes:
-
- 1. To load the GTID state from mysql.gtid_slave_pos at server start; reading
- from table requires valid THD, which is otherwise not available during
- server init.
-
- 2. To kill worker thread transactions during parallel replication, when a
- storage engine attempts to take an errorneous conflicting lock that would
- cause a deadlock. Killing is done asynchroneously, as the kill may not
- be safe within the context of a callback from inside storage engine
- locking code.
-*/
-static int
-start_slave_background_thread()
-{
- pthread_t th;
-
- slave_background_thread_running= true;
- slave_background_thread_stop= false;
- slave_background_thread_gtid_loaded= false;
- if (mysql_thread_create(key_thread_slave_background,
- &th, &connection_attrib, handle_slave_background,
- NULL))
- {
- sql_print_error("Failed to create thread while initialising slave");
- return 1;
- }
- mysql_mutex_lock(&LOCK_slave_background);
- while (!slave_background_thread_gtid_loaded)
- mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
-
- return 0;
-}
-
-
-static void
-stop_slave_background_thread()
-{
- mysql_mutex_lock(&LOCK_slave_background);
- slave_background_thread_stop= true;
- mysql_cond_broadcast(&COND_slave_background);
- while (slave_background_thread_running)
- mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
+ mysql_manager_submit(bg_gtid_delete_pending, NULL);
}
@@ -712,12 +561,19 @@ int init_slave()
init_slave_psi_keys();
#endif
- if (start_slave_background_thread())
- return 1;
-
if (global_rpl_thread_pool.init(opt_slave_parallel_threads))
return 1;
+ slave_background_thread_gtid_loaded= false;
+ mysql_manager_submit(bg_rpl_load_gtid_slave_state, NULL);
+
+ // hijacking global_rpl_thread_pool cond here - it's only once on startup
+ mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
+ while (!slave_background_thread_gtid_loaded)
+ mysql_cond_wait(&global_rpl_thread_pool.COND_rpl_thread_pool,
+ &global_rpl_thread_pool.LOCK_rpl_thread_pool);
+ mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
+
/*
This is called when mysqld starts. Before client connections are
accepted. However bootstrap may conflict with us if it does START SLAVE.
@@ -1213,12 +1069,8 @@ terminate_slave_thread(THD *thd,
int error __attribute__((unused));
DBUG_PRINT("loop", ("killing slave thread"));
-#ifdef WITH_WSREP
- /* awake_no_mutex() requires LOCK_thd_data to be locked if wsrep
- is enabled */
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
-#endif /* WITH_WSREP */
mysql_mutex_lock(&thd->LOCK_thd_kill);
+ mysql_mutex_lock(&thd->LOCK_thd_data);
#ifndef DONT_USE_THR_ALARM
/*
Error codes from pthread_kill are:
@@ -1231,9 +1083,7 @@ terminate_slave_thread(THD *thd,
thd->awake_no_mutex(NOT_KILLED);
mysql_mutex_unlock(&thd->LOCK_thd_kill);
-#ifdef WITH_WSREP
- if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
-#endif /* WITH_WSREP */
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
/*
There is a small chance that slave thread might miss the first
@@ -1446,7 +1296,6 @@ void slave_prepare_for_shutdown()
// It's safe to destruct worker pool now when
// all driver threads are gone.
global_rpl_thread_pool.deactivate();
- stop_slave_background_thread();
}
/*
@@ -1477,8 +1326,6 @@ void end_slave()
active_mi= 0;
mysql_mutex_unlock(&LOCK_active_mi);
- stop_slave_background_thread();
-
global_rpl_thread_pool.destroy();
free_all_rpl_filters();
DBUG_VOID_RETURN;
@@ -4737,10 +4584,7 @@ pthread_handler_t handle_slave_io(void *arg)
goto err;
}
-
-#ifdef WITH_WSREP
thd->variables.wsrep_on= 0;
-#endif
if (DBUG_EVALUATE_IF("failed_slave_start", 1, 0)
|| repl_semisync_slave.slave_start(mi))
{
@@ -5060,8 +4904,11 @@ log space");
err:
// print the current replication position
if (mi->using_gtid == Master_info::USE_GTID_NO)
+ {
sql_print_information("Slave I/O thread exiting, read up to log '%s', "
"position %llu", IO_RPL_LOG_NAME, mi->master_log_pos);
+ sql_print_information("master was %s:%d", mi->host, mi->port);
+ }
else
{
StringBuffer<100> tmp;
@@ -5070,6 +4917,7 @@ err:
"position %llu; GTID position %s",
IO_RPL_LOG_NAME, mi->master_log_pos,
tmp.c_ptr_safe());
+ sql_print_information("master was %s:%d", mi->host, mi->port);
}
repl_semisync_slave.slave_stop(mi);
thd->reset_query();
@@ -5672,6 +5520,7 @@ pthread_handler_t handle_slave_sql(void *arg)
sql_print_information("Slave SQL thread exiting, replication stopped in "
"log '%s' at position %llu%s", RPL_LOG_NAME,
rli->group_master_log_pos, tmp.c_ptr_safe());
+ sql_print_information("master was %s:%d", mi->host, mi->port);
}
#ifdef WITH_WSREP
wsrep_after_command_before_result(thd);
diff --git a/sql/sp.cc b/sql/sp.cc
index 0b553ebf7a1..de1a8a04756 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -2949,7 +2949,7 @@ Sp_handler::show_create_sp(THD *thd, String *buf,
buf->append(STRING_WITH_LEN(" RETURN "));
else
buf->append(STRING_WITH_LEN(" RETURNS "));
- buf->append(&returns);
+ buf->append(returns.str, returns.length); // Not \0 terminated
}
buf->append('\n');
switch (chistics.daccess) {
diff --git a/sql/spatial.cc b/sql/spatial.cc
index 2b36468e158..bda64c6d420 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2002, 2013, Oracle and/or its affiliates.
- Copyright (c) 2011, 2013, Monty Program Ab.
+ Copyright (c) 2011, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -605,6 +605,7 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer,
if (feature_type_found)
goto handle_geometry_key;
}
+ goto err_return;
}
else
{
@@ -1031,6 +1032,127 @@ const Geometry::Class_info *Gis_point::get_class_info() const
}
+/**
+ Function to calculate haversine.
+ Taking as arguments Point and Multipoint geometries.
+ Multipoint geometry has to be single point only.
+ It is up to caller to ensure valid input.
+
+ @param g pointer to the Geometry
+ @param r sphere radius
+ @param error pointer describing the error in case of the boundary conditions
+
+ @return distance in case without error, it is caclulcated distance (non-negative),
+ in case error exist, negative value.
+*/
+double Gis_point::calculate_haversine(const Geometry *g,
+ const double sphere_radius,
+ int *error)
+{
+ DBUG_ASSERT(sphere_radius > 0);
+ double x1r, x2r, y1r, y2r;
+
+ // This check is done only for optimization purposes where we know it will
+ // be one and only one point in Multipoint
+ if (g->get_class_info()->m_type_id == Geometry::wkb_multipoint)
+ {
+ const char point_size= 4 + WKB_HEADER_SIZE + POINT_DATA_SIZE+1; //1 for the type
+ char point_temp[point_size];
+ memset(point_temp+4, Geometry::wkb_point, 1);
+ memcpy(point_temp+5, static_cast<const Gis_multi_point *>(g)->get_data_ptr()+5, 4);
+ memcpy(point_temp+4+WKB_HEADER_SIZE, g->get_data_ptr()+4+WKB_HEADER_SIZE,
+ POINT_DATA_SIZE);
+ point_temp[point_size-1]= '\0';
+ Geometry_buffer gbuff;
+ Geometry *gg= Geometry::construct(&gbuff, point_temp, point_size-1);
+ DBUG_ASSERT(gg);
+ if (static_cast<Gis_point *>(gg)->get_xy_radian(&x2r, &y2r))
+ {
+ DBUG_ASSERT(0);
+ return -1;
+ }
+ }
+ else
+ {
+ if (static_cast<const Gis_point *>(g)->get_xy_radian(&x2r, &y2r))
+ {
+ DBUG_ASSERT(0);
+ return -1;
+ }
+ }
+ if (this->get_xy_radian(&x1r, &y1r))
+ {
+ DBUG_ASSERT(0);
+ return -1;
+ }
+ // Check boundary conditions: longitude[-180,180]
+ if (!((x2r >= -M_PI && x2r <= M_PI) && (x1r >= -M_PI && x1r <= M_PI)))
+ {
+ *error=1;
+ return -1;
+ }
+ // Check boundary conditions: latitude[-90,90]
+ if (!((y2r >= -M_PI/2 && y2r <= M_PI/2) && (y1r >= -M_PI/2 && y1r <= M_PI/2)))
+ {
+ *error=-1;
+ return -1;
+ }
+ double dlat= sin((y2r - y1r)/2)*sin((y2r - y1r)/2);
+ double dlong= sin((x2r - x1r)/2)*sin((x2r - x1r)/2);
+ return 2*sphere_radius*asin((sqrt(dlat + cos(y1r)*cos(y2r)*dlong)));
+}
+
+
+/**
+ Function that calculate spherical distance of Point from Multipoint geometries.
+ In case there is single point in Multipoint geometries calculate_haversine()
+ can handle such case. Otherwise, new geometry (Point) has to be constructed.
+
+ @param g pointer to the Geometry
+ @param r sphere radius
+ @param result pointer to the result
+ @param err pointer to the error obtained from calculate_haversin()
+
+ @return state
+ @retval TRUE failed
+ @retval FALSE success
+*/
+int Gis_point::spherical_distance_multipoints(Geometry *g, const double r,
+ double *result, int *err)
+{
+ uint32 num_of_points2;
+ // To find the minimum radius it cannot be greater than Earth radius
+ double res= 6370986.0;
+ double temp_res= 0.0;
+ const uint32 len= 4 + WKB_HEADER_SIZE + POINT_DATA_SIZE + 1;
+ char s[len];
+ g->num_geometries(&num_of_points2);
+ DBUG_ASSERT(num_of_points2 >= 1);
+ if (num_of_points2 == 1)
+ {
+ *result= this->calculate_haversine(g, r, err);
+ return 0;
+ }
+ for (uint32 i=1; i <= num_of_points2; i++)
+ {
+ Geometry_buffer buff_temp;
+ Geometry *temp;
+
+ // First 4 bytes are handled already, make sure to create a Point
+ memset(s + 4, Geometry::wkb_point, 1);
+ memcpy(s + 5, g->get_data_ptr() + 5, 4);
+ memcpy(s + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\
+ POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE);
+ s[len-1]= '\0';
+ temp= Geometry::construct(&buff_temp, s, len);
+ DBUG_ASSERT(temp);
+ temp_res= this->calculate_haversine(temp, r, err);
+ if (res > temp_res)
+ res= temp_res;
+ }
+ *result= res;
+ return 0;
+}
/***************************** LineString *******************************/
uint32 Gis_line_string::get_data_size() const
@@ -2162,6 +2284,81 @@ const Geometry::Class_info *Gis_multi_point::get_class_info() const
}
+/**
+ Function that calculate spherical distance of Multipoints geometries.
+ In case there is single point in Multipoint geometries calculate_haversine()
+ can handle such case. Otherwise, new geometry (Point) has to be constructed.
+
+ @param g pointer to the Geometry
+ @param r sphere radius
+ @param result pointer to the result
+ @param err pointer to the error obtained from calculate_haversin()
+
+ @return state
+ @retval TRUE failed
+ @retval FALSE success
+*/
+int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r,
+ double *result, int *err)
+{
+ const uint32 len= 4 + WKB_HEADER_SIZE + POINT_DATA_SIZE + 1;
+ // Check how many points are stored in Multipoints
+ uint32 num_of_points1, num_of_points2;
+ // To find the minimum radius it cannot be greater than Earth radius
+ double res= 6370986.0;
+
+ /* From Item_func_sphere_distance::spherical_distance_points,
+ we are sure that there will be multiple points and we have to construct
+ Point geometry and return the smallest result.
+ */
+ num_geometries(&num_of_points1);
+ DBUG_ASSERT(num_of_points1 >= 1);
+ g->num_geometries(&num_of_points2);
+ DBUG_ASSERT(num_of_points2 >= 1);
+
+ for (uint32 i=1; i <= num_of_points1; i++)
+ {
+ Geometry_buffer buff_temp;
+ Geometry *temp;
+ double temp_res= 0.0;
+ char s[len];
+ // First 4 bytes are handled already, make sure to create a Point
+ memset(s + 4, Geometry::wkb_point, 1);
+ memcpy(s + 5, this->get_data_ptr() + 5, 4);
+ memcpy(s + 4 + WKB_HEADER_SIZE, this->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\
+ POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE);
+ s[len-1]= '\0';
+ temp= Geometry::construct(&buff_temp, s, len);
+ DBUG_ASSERT(temp);
+ // Optimization for single Multipoint
+ if (num_of_points2 == 1)
+ {
+ *result= static_cast<Gis_point *>(temp)->calculate_haversine(g, r, err);
+ return 0;
+ }
+ for (uint32 j=1; j<= num_of_points2; j++)
+ {
+ Geometry_buffer buff_temp2;
+ Geometry *temp2;
+ char s2[len];
+ // First 4 bytes are handled already, make sure to create a Point
+ memset(s2 + 4, Geometry::wkb_point, 1);
+ memcpy(s2 + 5, g->get_data_ptr() + 5, 4);
+ memcpy(s2 + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*j +\
+ POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE);
+ s2[len-1]= '\0';
+ temp2= Geometry::construct(&buff_temp2, s2, len);
+ DBUG_ASSERT(temp2);
+ temp_res= static_cast<Gis_point *>(temp)->calculate_haversine(temp2, r, err);
+ if (res > temp_res)
+ res= temp_res;
+ }
+ }
+ *result= res;
+ return 0;
+}
+
+
/***************************** MultiLineString *******************************/
uint32 Gis_multi_line_string::get_data_size() const
diff --git a/sql/spatial.h b/sql/spatial.h
index 55f450b1b1b..0c00482c09b 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -332,6 +332,11 @@ public:
m_data+= WKB_HEADER_SIZE;
}
+ const char *get_data_ptr() const
+ {
+ return m_data;
+ }
+
bool envelope(String *result) const;
static Class_info *ci_collection[wkb_last+1];
@@ -410,6 +415,17 @@ public:
return 0;
}
+ int get_xy_radian(double *x, double *y) const
+ {
+ if (!get_xy(x, y))
+ {
+ *x= (*x)*M_PI/180;
+ *y= (*y)*M_PI/180;
+ return 0;
+ }
+ return 1;
+ }
+
int get_x(double *x) const
{
if (no_data(m_data, SIZEOF_STORED_DOUBLE))
@@ -436,6 +452,10 @@ public:
}
int store_shapes(Gcalc_shape_transporter *trn) const;
const Class_info *get_class_info() const;
+ double calculate_haversine(const Geometry *g, const double sphere_radius,
+ int *error);
+ int spherical_distance_multipoints(Geometry *g, const double r, double *result,
+ int *error);
};
@@ -535,6 +555,8 @@ public:
}
int store_shapes(Gcalc_shape_transporter *trn) const;
const Class_info *get_class_info() const;
+ int spherical_distance_multipoints(Geometry *g, const double r, double *res,
+ int *error);
};
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index cb2757ba48f..96f1b87d5d7 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -3159,6 +3159,12 @@ end:
int acl_check_setrole(THD *thd, const char *rolename, ulonglong *access)
{
+ if (!initialized)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
+ return 1;
+ }
+
return check_user_can_set_role(thd, thd->security_ctx->priv_user,
thd->security_ctx->host, thd->security_ctx->ip, rolename, access);
}
@@ -3788,7 +3794,7 @@ bool change_password(THD *thd, LEX_USER *user)
char buff[512];
ulong query_length= 0;
enum_binlog_format save_binlog_format;
- int result=0;
+ bool result= false, acl_cache_is_locked= false;
ACL_USER *acl_user;
ACL_USER::AUTH auth;
const char *password_plugin= 0;
@@ -3813,7 +3819,7 @@ bool change_password(THD *thd, LEX_USER *user)
if ((result= tables.open_and_lock(thd, Table_user, TL_WRITE)))
DBUG_RETURN(result != 1);
- result= 1;
+ acl_cache_is_locked= 1;
mysql_mutex_lock(&acl_cache->lock);
if (!(acl_user= find_user_exact(user->host.str, user->user.str)))
@@ -3866,7 +3872,7 @@ bool change_password(THD *thd, LEX_USER *user)
acl_cache->clear(1); // Clear locked hostname cache
mysql_mutex_unlock(&acl_cache->lock);
- result= 0;
+ result= acl_cache_is_locked= 0;
if (mysql_bin_log.is_open())
{
query_length= sprintf(buff, "SET PASSWORD FOR '%-.120s'@'%-.120s'='%-.120s'",
@@ -3877,7 +3883,7 @@ bool change_password(THD *thd, LEX_USER *user)
FALSE, FALSE, FALSE, 0) > 0;
}
end:
- if (result)
+ if (acl_cache_is_locked)
mysql_mutex_unlock(&acl_cache->lock);
close_mysql_tables(thd);
@@ -5342,7 +5348,7 @@ routine_hash_search(const char *host, const char *ip, const char *db,
const char *user, const char *tname, const Sp_handler *sph,
bool exact)
{
- return (GRANT_TABLE*)
+ return (GRANT_NAME*)
name_hash_search(sph->get_priv_hash(),
host, ip, db, user, tname, exact, TRUE);
}
@@ -5356,6 +5362,10 @@ table_hash_search(const char *host, const char *ip, const char *db,
user, tname, exact, FALSE);
}
+static bool column_priv_insert(GRANT_TABLE *grant)
+{
+ return my_hash_insert(&column_priv_hash,(uchar*) grant);
+}
static GRANT_COLUMN *
column_hash_search(GRANT_TABLE *t, const char *cname, size_t length)
@@ -5585,6 +5595,15 @@ static inline void get_grantor(THD *thd, char *grantor)
strxmov(grantor, user, "@", host, NullS);
}
+
+/**
+ Revoke rights from a grant table entry.
+
+ @return 0 ok
+ @return 1 fatal error (error given)
+ @return -1 grant table was revoked
+*/
+
static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
TABLE *table, const LEX_USER &combo,
const char *db, const char *table_name,
@@ -5609,7 +5628,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
{
my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
MYF(0)); /* purecov: deadcode */
- DBUG_RETURN(-1); /* purecov: deadcode */
+ DBUG_RETURN(1); /* purecov: deadcode */
}
}
@@ -5640,7 +5659,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0),
combo.user.str, combo.host.str,
table_name); /* purecov: deadcode */
- DBUG_RETURN(-1); /* purecov: deadcode */
+ DBUG_RETURN(1); /* purecov: deadcode */
}
old_row_exists = 0;
restore_record(table,record[1]); // Get saved record
@@ -5703,13 +5722,14 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
else
{
my_hash_delete(&column_priv_hash,(uchar*) grant_table);
+ DBUG_RETURN(-1); // Entry revoked
}
DBUG_RETURN(0);
/* This should never happen */
table_error:
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
- DBUG_RETURN(-1); /* purecov: deadcode */
+ DBUG_RETURN(1); /* purecov: deadcode */
}
@@ -6470,7 +6490,7 @@ static int update_role_table_columns(GRANT_TABLE *merged,
privs, cols);
merged->init_privs= merged->init_cols= 0;
update_role_columns(merged, first, last);
- my_hash_insert(&column_priv_hash,(uchar*) merged);
+ column_priv_insert(merged);
return 2;
}
else if ((privs | cols) == 0)
@@ -6790,7 +6810,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
bool revoke_grant)
{
ulong column_priv= 0;
- int result;
+ int result, res;
List_iterator <LEX_USER> str_list (user_list);
LEX_USER *Str, *tmp_Str;
bool create_new_users=0;
@@ -6933,12 +6953,12 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
result= TRUE;
continue;
}
- grant_table = new GRANT_TABLE (Str->host.str, db_name,
- Str->user.str, table_name,
- rights,
- column_priv);
+ grant_table= new (&grant_memroot) GRANT_TABLE(Str->host.str, db_name,
+ Str->user.str, table_name,
+ rights,
+ column_priv);
if (!grant_table ||
- my_hash_insert(&column_priv_hash,(uchar*) grant_table))
+ column_priv_insert(grant_table))
{
result= TRUE; /* purecov: deadcode */
continue; /* purecov: deadcode */
@@ -6981,22 +7001,24 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
/* TODO(cvicentiu) refactor replace_table_table to use Tables_priv_table
instead of TABLE directly. */
- if (replace_table_table(thd, grant_table, tables.tables_priv_table().table(),
- *Str, db_name, table_name,
- rights, column_priv, revoke_grant))
- {
- /* Should only happen if table is crashed */
- result= TRUE; /* purecov: deadcode */
- }
- else if (tables.columns_priv_table().table_exists())
+ if (tables.columns_priv_table().table_exists())
{
/* TODO(cvicentiu) refactor replace_column_table to use Columns_priv_table
instead of TABLE directly. */
if (replace_column_table(grant_table, tables.columns_priv_table().table(),
*Str, columns, db_name, table_name, rights,
revoke_grant))
- {
result= TRUE;
+ }
+ if ((res= replace_table_table(thd, grant_table,
+ tables.tables_priv_table().table(),
+ *Str, db_name, table_name,
+ rights, column_priv, revoke_grant)))
+ {
+ if (res > 0)
+ {
+ /* Should only happen if table is crashed */
+ result= TRUE; /* purecov: deadcode */
}
}
if (Str->is_role())
@@ -7008,9 +7030,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
mysql_mutex_unlock(&acl_cache->lock);
if (!result) /* success */
- {
result= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
- }
mysql_rwlock_unlock(&LOCK_grant);
@@ -7698,7 +7718,7 @@ static bool grant_load(THD *thd,
if (! mem_check->ok())
delete mem_check;
- else if (my_hash_insert(&column_priv_hash,(uchar*) mem_check))
+ else if (column_priv_insert(mem_check))
{
delete mem_check;
goto end_unlock;
@@ -8896,6 +8916,16 @@ static bool print_grants_for_role(THD *thd, ACL_ROLE * role)
}
+static void append_auto_expiration_policy(ACL_USER *acl_user, String *r) {
+ if (!acl_user->password_lifetime)
+ r->append(STRING_WITH_LEN(" PASSWORD EXPIRE NEVER"));
+ else if (acl_user->password_lifetime > 0)
+ {
+ r->append(STRING_WITH_LEN(" PASSWORD EXPIRE INTERVAL "));
+ r->append_longlong(acl_user->password_lifetime);
+ r->append(STRING_WITH_LEN(" DAY"));
+ }
+}
bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
{
@@ -8955,14 +8985,8 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
if (acl_user->password_expired)
result.append(STRING_WITH_LEN(" PASSWORD EXPIRE"));
- else if (!acl_user->password_lifetime)
- result.append(STRING_WITH_LEN(" PASSWORD EXPIRE NEVER"));
- else if (acl_user->password_lifetime > 0)
- {
- result.append(STRING_WITH_LEN(" PASSWORD EXPIRE INTERVAL "));
- result.append_longlong(acl_user->password_lifetime);
- result.append(STRING_WITH_LEN(" DAY"));
- }
+ else
+ append_auto_expiration_policy(acl_user, &result);
protocol->prepare_for_resend();
protocol->store(result.ptr(), result.length(), result.charset());
@@ -8970,6 +8994,28 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
{
error= true;
}
+
+ /* MDEV-24114 - PASSWORD EXPIRE and PASSWORD EXPIRE [NEVER | INTERVAL X DAY]
+ are two different mechanisms. To make sure a tool can restore the state
+ of a user account, including both the manual expiration state of the
+ account and the automatic expiration policy attached to it, we should
+ print two statements here, a CREATE USER (printed above) and an ALTER USER */
+ if (acl_user->password_expired && acl_user->password_lifetime > -1) {
+ result.length(0);
+ result.append("ALTER USER ");
+ append_identifier(thd, &result, username, strlen(username));
+ result.append('@');
+ append_identifier(thd, &result, acl_user->host.hostname,
+ acl_user->hostname_length);
+ append_auto_expiration_policy(acl_user, &result);
+ protocol->prepare_for_resend();
+ protocol->store(result.ptr(), result.length(), result.charset());
+ if (protocol->write())
+ {
+ error= true;
+ }
+ }
+
my_eof(thd);
end:
@@ -11068,7 +11114,7 @@ mysql_revoke_sp_privs(THD *thd, Grant_tables *tables, const Sp_handler *sph,
bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
{
uint counter, revoked;
- int result;
+ int result, res;
ACL_DB *acl_db;
DBUG_ENTER("mysql_revoke_all");
@@ -11161,36 +11207,35 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
if (!strcmp(lex_user->user.str,user) &&
!strcmp(lex_user->host.str, host))
{
- /* TODO(cvicentiu) refactor replace_db_table to use
- Db_table instead of TABLE directly. */
- if (replace_table_table(thd, grant_table,
- tables.tables_priv_table().table(),
- *lex_user, grant_table->db,
- grant_table->tname, ~(ulong)0, 0, 1))
- {
+ List<LEX_COLUMN> columns;
+ /* TODO(cvicentiu) refactor replace_db_table to use
+ Db_table instead of TABLE directly. */
+ if (replace_column_table(grant_table,
+ tables.columns_priv_table().table(),
+ *lex_user, columns, grant_table->db,
+ grant_table->tname, ~(ulong)0, 1))
result= -1;
- }
- else
+
+ /* TODO(cvicentiu) refactor replace_db_table to use
+ Db_table instead of TABLE directly. */
+ if ((res= replace_table_table(thd, grant_table,
+ tables.tables_priv_table().table(),
+ *lex_user, grant_table->db,
+ grant_table->tname, ~(ulong)0, 0, 1)))
{
- if (!grant_table->cols)
- {
- revoked= 1;
- continue;
- }
- List<LEX_COLUMN> columns;
- /* TODO(cvicentiu) refactor replace_db_table to use
- Db_table instead of TABLE directly. */
- if (!replace_column_table(grant_table,
- tables.columns_priv_table().table(),
- *lex_user, columns, grant_table->db,
- grant_table->tname, ~(ulong)0, 1))
- {
- revoked= 1;
- continue;
- }
- result= -1;
- }
- }
+ if (res > 0)
+ result= -1;
+ else
+ {
+ /*
+ Entry was deleted. We have to retry the loop as the
+ hash table has probably been reorganized.
+ */
+ revoked= 1;
+ continue;
+ }
+ }
+ }
counter++;
}
} while (revoked);
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 0a6e76d117f..a96eb58809b 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -447,8 +447,6 @@ dbug_err:
*/
static bool wsrep_toi_replication(THD *thd, TABLE_LIST *tables)
{
- if (!WSREP(thd) || !WSREP_CLIENT(thd)) return false;
-
LEX *lex= thd->lex;
/* only handle OPTIMIZE and REPAIR here */
switch (lex->sql_command)
@@ -549,10 +547,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
for (table= tables; table; table= table->next_local)
table->table= NULL;
#ifdef WITH_WSREP
- if (wsrep_toi_replication(thd, tables))
+ if (WSREP(thd))
{
- WSREP_INFO("wsrep TOI replication of has failed, skipping OPTIMIZE");
- goto err;
+ if(wsrep_toi_replication(thd, tables))
+ {
+ WSREP_INFO("wsrep TOI replication of has failed.");
+ goto err;
+ }
}
#endif /* WITH_WSREP */
@@ -1396,7 +1397,9 @@ bool Sql_cmd_analyze_table::execute(THD *thd)
/*
Presumably, ANALYZE and binlog writing doesn't require synchronization
*/
+ thd->get_stmt_da()->set_overwrite_status(true);
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
+ thd->get_stmt_da()->set_overwrite_status(false);
}
m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
@@ -1455,7 +1458,9 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
/*
Presumably, OPTIMIZE and binlog writing doesn't require synchronization
*/
+ thd->get_stmt_da()->set_overwrite_status(true);
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
+ thd->get_stmt_da()->set_overwrite_status(false);
}
m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
@@ -1487,7 +1492,9 @@ bool Sql_cmd_repair_table::execute(THD *thd)
/*
Presumably, REPAIR and binlog writing doesn't require synchronization
*/
+ thd->get_stmt_da()->set_overwrite_status(true);
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
+ thd->get_stmt_da()->set_overwrite_status(false);
}
m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc
index 2bbc8169df2..f1a67e7d968 100644
--- a/sql/sql_alter.cc
+++ b/sql/sql_alter.cc
@@ -471,7 +471,7 @@ bool Sql_cmd_alter_table::execute(THD *thd)
if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE))
DBUG_RETURN(TRUE); /* purecov: inspected */
#ifdef WITH_WSREP
- if (WSREP(thd) && WSREP_CLIENT(thd) &&
+ if (WSREP(thd) &&
(!thd->is_current_stmt_binlog_format_row() ||
!thd->find_temporary_table(first_table)))
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 868d76572e3..cc6ecda9327 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -4693,7 +4693,72 @@ add_internal_tables(THD *thd, Query_tables_list *prelocking_ctx,
DBUG_RETURN(FALSE);
}
+/**
+ Extend the table_list to include foreign tables for prelocking.
+
+ @param[in] thd Thread context.
+ @param[in] prelocking_ctx Prelocking context of the statement.
+ @param[in] table_list Table list element for table.
+ @param[in] sp Routine body.
+ @param[out] need_prelocking Set to TRUE if method detects that prelocking
+ required, not changed otherwise.
+
+ @retval FALSE Success.
+ @retval TRUE Failure (OOM).
+*/
+inline bool
+prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx,
+ TABLE_LIST *table_list, bool *need_prelocking,
+ uint8 op)
+{
+ DBUG_ENTER("prepare_fk_prelocking_list");
+ List <FOREIGN_KEY_INFO> fk_list;
+ List_iterator<FOREIGN_KEY_INFO> fk_list_it(fk_list);
+ FOREIGN_KEY_INFO *fk;
+ Query_arena *arena, backup;
+ TABLE *table= table_list->table;
+
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+ table->file->get_parent_foreign_key_list(thd, &fk_list);
+ if (unlikely(thd->is_error()))
+ {
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ return TRUE;
+ }
+
+ *need_prelocking= TRUE;
+
+ while ((fk= fk_list_it++))
+ {
+ // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access
+ thr_lock_type lock_type;
+
+ if ((op & (1 << TRG_EVENT_DELETE) && fk_modifies_child(fk->delete_method))
+ || (op & (1 << TRG_EVENT_UPDATE) && fk_modifies_child(fk->update_method)))
+ lock_type= TL_WRITE_ALLOW_WRITE;
+ else
+ lock_type= TL_READ;
+
+ if (table_already_fk_prelocked(prelocking_ctx->query_tables,
+ fk->foreign_db, fk->foreign_table,
+ lock_type))
+ continue;
+
+ TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST));
+ tl->init_one_table_for_prelocking(fk->foreign_db,
+ fk->foreign_table,
+ NULL, lock_type,
+ TABLE_LIST::PRELOCK_FK,
+ table_list->belong_to_view, op,
+ &prelocking_ctx->query_tables_last,
+ table_list->for_insert_data);
+ }
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ DBUG_RETURN(FALSE);
+}
/**
Defines how prelocking algorithm for DML statements should handle table list
@@ -4740,53 +4805,20 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx,
if (table->file->referenced_by_foreign_key())
{
- List <FOREIGN_KEY_INFO> fk_list;
- List_iterator<FOREIGN_KEY_INFO> fk_list_it(fk_list);
- FOREIGN_KEY_INFO *fk;
- Query_arena *arena, backup;
-
- arena= thd->activate_stmt_arena_if_needed(&backup);
-
- table->file->get_parent_foreign_key_list(thd, &fk_list);
- if (unlikely(thd->is_error()))
- {
- if (arena)
- thd->restore_active_arena(arena, &backup);
- DBUG_RETURN(TRUE);
- }
-
- *need_prelocking= TRUE;
-
- while ((fk= fk_list_it++))
- {
- // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access
- uint8 op= table_list->trg_event_map;
- thr_lock_type lock_type;
-
- if ((op & (1 << TRG_EVENT_DELETE) && fk_modifies_child(fk->delete_method))
- || (op & (1 << TRG_EVENT_UPDATE) && fk_modifies_child(fk->update_method)))
- lock_type= TL_WRITE_ALLOW_WRITE;
- else
- lock_type= TL_READ;
-
- if (table_already_fk_prelocked(prelocking_ctx->query_tables,
- fk->foreign_db, fk->foreign_table,
- lock_type))
- continue;
-
- TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST));
- tl->init_one_table_for_prelocking(fk->foreign_db,
- fk->foreign_table,
- NULL, lock_type,
- TABLE_LIST::PRELOCK_FK,
- table_list->belong_to_view, op,
- &prelocking_ctx->query_tables_last,
- table_list->for_insert_data);
- }
- if (arena)
- thd->restore_active_arena(arena, &backup);
+ if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list,
+ need_prelocking,
+ table_list->trg_event_map))
+ return TRUE;
}
}
+ else if (table_list->slave_fk_event_map &&
+ table->file->referenced_by_foreign_key())
+ {
+ if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list,
+ need_prelocking,
+ table_list->slave_fk_event_map))
+ return TRUE;
+ }
/* Open any tables used by DEFAULT (like sequence tables) */
DBUG_PRINT("info", ("table: %p name: %s db: %s flags: %u",
@@ -6231,6 +6263,7 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Check if there are sufficient access rights to the found field. */
if (check_privileges &&
+ !table_list->is_derived() &&
check_column_grant_in_table_ref(thd, *actual_table, name, length, fld))
fld= WRONG_GRANT;
else
@@ -6453,7 +6486,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
if (!all_merged && current_sel != last_select)
{
mark_select_range_as_dependent(thd, last_select, current_sel,
- found, *ref, item);
+ found, *ref, item, true);
}
}
return found;
@@ -7865,11 +7898,15 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
DBUG_RETURN(1);
}
tablenr++;
- }
- if (tablenr > MAX_TABLES)
- {
- my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
- DBUG_RETURN(1);
+ /*
+ We test the max tables here as we setup_table_map() should not be called
+ with tablenr >= 64
+ */
+ if (tablenr > MAX_TABLES)
+ {
+ my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
+ DBUG_RETURN(1);
+ }
}
}
else
@@ -7915,7 +7952,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
if (table_list->jtbm_subselect)
{
Item *item= table_list->jtbm_subselect->optimizer;
- if (table_list->jtbm_subselect->optimizer->fix_fields(thd, &item))
+ if (!table_list->jtbm_subselect->optimizer->fixed &&
+ table_list->jtbm_subselect->optimizer->fix_fields(thd, &item))
{
my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES)); /* psergey-todo: WHY ER_TOO_MANY_TABLES ???*/
DBUG_RETURN(1);
@@ -8092,36 +8130,23 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/*
- Ensure that we have access rights to all fields to be inserted. Under
- some circumstances, this check may be skipped.
-
- - If any_privileges is true, skip the check.
+ Ensure that we have access rights to all fields to be inserted
+ the table 'tables'. Under some circumstances, this check may be skipped.
- - If the SELECT privilege has been found as fulfilled already for both
- the TABLE and TABLE_LIST objects (and both of these exist, of
- course), the check is skipped.
+ The check is skipped in the following cases:
- - If the SELECT privilege has been found fulfilled for the TABLE object
- and the TABLE_LIST represents a derived table other than a view (see
- below), the check is skipped.
+ - any_privileges is true
- - If the TABLE_LIST object represents a view, we may skip checking if
- the SELECT privilege has been found fulfilled for it, regardless of
- the TABLE object.
+ - the table is a derived table
- - If there is no TABLE object, the test is skipped if either
- * the TABLE_LIST does not represent a view, or
- * the SELECT privilege has been found fulfilled.
+ - the table is a view with SELECT privilege
- A TABLE_LIST that is not a view may be a subquery, an
- information_schema table, or a nested table reference. See the comment
- for TABLE_LIST.
+ - the table is a base table with SELECT privilege
*/
- if (!((table && tables->is_non_derived() &&
- (table->grant.privilege & SELECT_ACL)) ||
- ((!tables->is_non_derived() &&
- (tables->grant.privilege & SELECT_ACL)))) &&
- !any_privileges)
+ if (!any_privileges &&
+ !tables->is_derived() &&
+ !(tables->is_view() && (tables->grant.privilege & SELECT_ACL)) &&
+ !(table && (table->grant.privilege & SELECT_ACL)))
{
field_iterator.set(tables);
if (check_grant_all_columns(thd, SELECT_ACL, &field_iterator))
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 0ba4732b6cd..bef3318f974 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1449,7 +1449,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
DBUG_PRINT("qcache", ("\
long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
-sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %zu, \
+sql mode: 0x%llx, sort len: %llu, concat len: %u, div_precision: %zu, \
def_week_frmt: %zu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
@@ -1949,7 +1949,7 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
DBUG_PRINT("qcache", ("\
long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
-sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %zu, \
+sql mode: 0x%llx, sort len: %llu, concat len: %u, div_precision: %zu, \
def_week_frmt: %zu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 92635ecacc7..d59bc37b7a3 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -558,11 +558,11 @@ struct Query_cache_query_flags
uint character_set_client_num;
uint character_set_results_num;
uint collation_connection_num;
+ uint group_concat_max_len;
ha_rows limit;
Time_zone *time_zone;
sql_mode_t sql_mode;
ulonglong max_sort_length;
- ulonglong group_concat_max_len;
size_t default_week_format;
size_t div_precision_increment;
MY_LOCALE *lc_time_names;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ceb8dc1ade8..093a94f44f8 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2020, MariaDB Corporation.
+ Copyright (c) 2008, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -49,9 +49,6 @@
#include <m_ctype.h>
#include <sys/stat.h>
#include <thr_alarm.h>
-#ifdef __WIN__0
-#include <io.h>
-#endif
#include <mysys_err.h>
#include <limits.h>
@@ -70,6 +67,8 @@
#ifdef WITH_WSREP
#include "wsrep_thd.h"
#include "wsrep_trans_observer.h"
+#else
+static inline bool wsrep_is_bf_aborted(THD* thd) { return false; }
#endif /* WITH_WSREP */
#include "opt_trace.h"
@@ -450,6 +449,7 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton,
const void *ha_data)
{
plugin_ref *lock= &thd->ha_data[hton->slot].lock;
+ DBUG_ASSERT(thd == current_thd);
if (ha_data && !*lock)
*lock= ha_lock_engine(NULL, (handlerton*) hton);
else if (!ha_data && *lock)
@@ -457,7 +457,9 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton,
plugin_unlock(NULL, *lock);
*lock= NULL;
}
+ mysql_mutex_lock(&thd->LOCK_thd_data);
*thd_ha_data(thd, hton)= (void*) ha_data;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
}
@@ -782,7 +784,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
net.reading_or_writing= 0;
client_capabilities= 0; // minimalistic client
system_thread= NON_SYSTEM_THREAD;
- cleanup_done= free_connection_done= abort_on_warning= 0;
+ cleanup_done= free_connection_done= abort_on_warning= got_warning= 0;
peer_port= 0; // For SHOW PROCESSLIST
transaction.m_pending_rows_event= 0;
transaction.on= 1;
@@ -797,6 +799,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
mysql_mutex_init(key_LOCK_wakeup_ready, &LOCK_wakeup_ready, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_thd_kill, &LOCK_thd_kill, MY_MUTEX_INIT_FAST);
mysql_cond_init(key_COND_wakeup_ready, &COND_wakeup_ready, 0);
+ mysql_mutex_record_order(&LOCK_thd_kill, &LOCK_thd_data);
/* Variables with default values */
proc_info="login";
@@ -1520,9 +1523,7 @@ void THD::cleanup(void)
set_killed(KILL_CONNECTION);
#ifdef WITH_WSREP
if (wsrep_cs().state() != wsrep::client_state::s_none)
- {
wsrep_cs().cleanup();
- }
wsrep_client_thread= false;
#endif /* WITH_WSREP */
@@ -1598,12 +1599,12 @@ void THD::cleanup(void)
void THD::free_connection()
{
DBUG_ASSERT(free_connection_done == 0);
- my_free((char*) db.str);
+ my_free(const_cast<char*>(db.str));
db= null_clex_str;
#ifndef EMBEDDED_LIBRARY
if (net.vio)
vio_delete(net.vio);
- net.vio= 0;
+ net.vio= nullptr;
net_end(&net);
#endif
if (!cleanup_done)
@@ -1676,19 +1677,16 @@ THD::~THD()
THD is not deleted while they access it. The following mutex_lock
ensures that no one else is using this THD and it's now safe to delete
*/
- if (WSREP_NNULL(this)) mysql_mutex_lock(&LOCK_thd_data);
mysql_mutex_lock(&LOCK_thd_kill);
mysql_mutex_unlock(&LOCK_thd_kill);
- if (WSREP_NNULL(this)) mysql_mutex_unlock(&LOCK_thd_data);
+#ifdef WITH_WSREP
+ delete wsrep_rgi;
+#endif
if (!free_connection_done)
free_connection();
#ifdef WITH_WSREP
- if (wsrep_rgi != NULL) {
- delete wsrep_rgi;
- wsrep_rgi = NULL;
- }
mysql_cond_destroy(&COND_wsrep_thd);
#endif
mdl_context.destroy();
@@ -1871,7 +1869,7 @@ void THD::awake_no_mutex(killed_state state_to_set)
DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d",
this, current_thd, (int) state_to_set));
THD_CHECK_SENTRY(this);
- if (WSREP_NNULL(this)) mysql_mutex_assert_owner(&LOCK_thd_data);
+ mysql_mutex_assert_owner(&LOCK_thd_data);
mysql_mutex_assert_owner(&LOCK_thd_kill);
print_aborted_warning(3, "KILLED");
@@ -1904,15 +1902,21 @@ void THD::awake_no_mutex(killed_state state_to_set)
}
/* Interrupt target waiting inside a storage engine. */
- if (IF_WSREP(state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this),
- state_to_set != NOT_KILLED))
+ if (state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this))
ha_kill_query(this, thd_kill_level(this));
- /* Broadcast a condition to kick the target if it is waiting on it. */
+ abort_current_cond_wait(false);
+ DBUG_VOID_RETURN;
+}
+
+/* Broadcast a condition to kick the target if it is waiting on it. */
+void THD::abort_current_cond_wait(bool force)
+{
+ mysql_mutex_assert_owner(&LOCK_thd_kill);
if (mysys_var)
{
mysql_mutex_lock(&mysys_var->mutex);
- if (!system_thread) // Don't abort locks
+ if (!system_thread || force) // Don't abort locks
mysys_var->abort=1;
/*
@@ -1970,7 +1974,6 @@ void THD::awake_no_mutex(killed_state state_to_set)
}
mysql_mutex_unlock(&mysys_var->mutex);
}
- DBUG_VOID_RETURN;
}
@@ -2024,16 +2027,7 @@ bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use,
mysql_mutex_lock(&in_use->LOCK_thd_kill);
if (in_use->killed < KILL_CONNECTION)
in_use->set_killed_no_mutex(KILL_CONNECTION);
- if (in_use->mysys_var)
- {
- mysql_mutex_lock(&in_use->mysys_var->mutex);
- if (in_use->mysys_var->current_cond)
- mysql_cond_broadcast(in_use->mysys_var->current_cond);
-
- /* Abort if about to wait in thr_upgrade_write_delay_lock */
- in_use->mysys_var->abort= 1;
- mysql_mutex_unlock(&in_use->mysys_var->mutex);
- }
+ in_use->abort_current_cond_wait(true);
mysql_mutex_unlock(&in_use->LOCK_thd_kill);
signalled= TRUE;
}
@@ -3142,12 +3136,12 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange,
}
/* Create the file world readable */
if ((file= mysql_file_create(key_select_to_file,
- path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
+ path, 0644, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
return file;
#ifdef HAVE_FCHMOD
- (void) fchmod(file, 0666); // Because of umask()
+ (void) fchmod(file, 0644); // Because of umask()
#else
- (void) chmod(path, 0666);
+ (void) chmod(path, 0644);
#endif
if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME)))
{
@@ -5062,6 +5056,18 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd)
DBUG_EXECUTE_IF("disable_thd_need_ordering_with", return 1;);
if (!thd || !other_thd)
return 1;
+#ifdef WITH_WSREP
+ /* wsrep applier, replayer and TOI processing threads are ordered
+ by replication provider, relaxed GAP locking protocol can be used
+ between high priority wsrep threads.
+ Note that wsrep_thd_is_BF() doesn't take LOCK_thd_data for either thd,
+ the caller should guarantee that the BF state won't change.
+ (e.g. InnoDB does it by keeping lock_sys.mutex locked)
+ */
+ if (WSREP_ON && wsrep_thd_is_BF(thd, false) &&
+ wsrep_thd_is_BF(other_thd, false))
+ return 0;
+#endif /* WITH_WSREP */
rgi= thd->rgi_slave;
other_rgi= other_thd->rgi_slave;
if (!rgi || !other_rgi)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 6b3c1594e60..d33cd1b35a4 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -592,7 +592,6 @@ typedef struct system_variables
ulonglong bulk_insert_buff_size;
ulonglong join_buff_size;
ulonglong sortbuff_size;
- ulonglong group_concat_max_len;
ulonglong default_regex_flags;
ulonglong max_mem_used;
@@ -685,6 +684,8 @@ typedef struct system_variables
uint32 gtid_domain_id;
uint64 gtid_seq_no;
+ uint group_concat_max_len;
+
/**
Default transaction access mode. READ ONLY (true) or READ WRITE (false).
*/
@@ -932,11 +933,24 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
STATUS_VAR *dec_var);
+uint calc_sum_of_all_status(STATUS_VAR *to);
+static inline void calc_sum_of_all_status_if_needed(STATUS_VAR *to)
+{
+ if (to->local_memory_used == 0)
+ {
+ mysql_mutex_lock(&LOCK_status);
+ *to= global_status_var;
+ mysql_mutex_unlock(&LOCK_status);
+ calc_sum_of_all_status(to);
+ DBUG_ASSERT(to->local_memory_used);
+ }
+}
+
/*
Update global_memory_used. We have to do this with atomic_add as the
global value can change outside of LOCK_status.
*/
-inline void update_global_memory_status(int64 size)
+static inline void update_global_memory_status(int64 size)
{
DBUG_PRINT("info", ("global memory_used: %lld size: %lld",
(longlong) global_status_var.global_memory_used,
@@ -954,7 +968,7 @@ inline void update_global_memory_status(int64 size)
@retval NULL on error
@retval Pointter to CHARSET_INFO with the given name on success
*/
-inline CHARSET_INFO *
+static inline CHARSET_INFO *
mysqld_collation_get_by_name(const char *name,
CHARSET_INFO *name_cs= system_charset_info)
{
@@ -973,7 +987,7 @@ mysqld_collation_get_by_name(const char *name,
return cs;
}
-inline bool is_supported_parser_charset(CHARSET_INFO *cs)
+static inline bool is_supported_parser_charset(CHARSET_INFO *cs)
{
return MY_TEST(cs->mbminlen == 1);
}
@@ -2267,7 +2281,7 @@ public:
- mysys_var (used by KILL statement and shutdown).
- Also ensures that THD is not deleted while mutex is hold
*/
- mysql_mutex_t LOCK_thd_kill;
+ mutable mysql_mutex_t LOCK_thd_kill;
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
@@ -3308,19 +3322,13 @@ public:
void awake_no_mutex(killed_state state_to_set);
void awake(killed_state state_to_set)
{
- bool wsrep_on_local= variables.wsrep_on;
- /*
- mutex locking order (LOCK_thd_data - LOCK_thd_kill)) requires
- to grab LOCK_thd_data here
- */
- if (wsrep_on_local)
- mysql_mutex_lock(&LOCK_thd_data);
mysql_mutex_lock(&LOCK_thd_kill);
+ mysql_mutex_lock(&LOCK_thd_data);
awake_no_mutex(state_to_set);
+ mysql_mutex_unlock(&LOCK_thd_data);
mysql_mutex_unlock(&LOCK_thd_kill);
- if (wsrep_on_local)
- mysql_mutex_unlock(&LOCK_thd_data);
}
+ void abort_current_cond_wait(bool force);
/** Disconnect the associated communication endpoint. */
void disconnect();
@@ -4060,8 +4068,7 @@ public:
mysql_mutex_lock(&LOCK_thd_kill);
int err= killed_errno();
if (err)
- my_message(err, killed_err ? killed_err->msg : ER_THD(this, err),
- MYF(0));
+ my_message(err, killed_err ? killed_err->msg : ER_THD(this, err), MYF(0));
mysql_mutex_unlock(&LOCK_thd_kill);
}
/* return TRUE if we will abort query if we make a warning now */
@@ -5787,10 +5794,15 @@ class select_union_recursive :public select_unit
public:
/* The temporary table with the new records generated by one iterative step */
TABLE *incr_table;
+ /* The TMP_TABLE_PARAM structure used to create incr_table */
+ TMP_TABLE_PARAM incr_table_param;
/* One of tables from the list rec_tables (determined dynamically) */
TABLE *first_rec_table_to_update;
- /* The temporary tables used for recursive table references */
- List<TABLE> rec_tables;
+ /*
+ The list of all recursive table references to the CTE for whose
+ specification this select_union_recursive was created
+ */
+ List<TABLE_LIST> rec_table_refs;
/*
The count of how many times cleanup() was called with cleaned==false
for the unit specifying the recursive CTE for which this object was created
@@ -5800,7 +5812,8 @@ class select_union_recursive :public select_unit
select_union_recursive(THD *thd_arg):
select_unit(thd_arg),
- incr_table(0), first_rec_table_to_update(0), cleanup_count(0) {};
+ incr_table(0), first_rec_table_to_update(0), cleanup_count(0)
+ { incr_table_param.init(); };
int send_data(List<Item> &items);
bool create_result_table(THD *thd, List<Item> *column_types,
@@ -6035,11 +6048,13 @@ public:
- The sj-materialization temporary table
- Members needed to make index lookup or a full scan of the temptable.
*/
+class POSITION;
+
class SJ_MATERIALIZATION_INFO : public Sql_alloc
{
public:
/* Optimal join sub-order */
- struct st_position *positions;
+ POSITION *positions;
uint tables; /* Number of tables in the sj-nest */
@@ -6095,8 +6110,6 @@ struct SORT_FIELD_ATTR
{
uint length; /* Length of sort field */
uint suffix_length; /* Length suffix (0-4) */
- enum Type { FIXED_SIZE, VARIABLE_SIZE } type;
- bool is_variable_sized() { return type == VARIABLE_SIZE; }
};
@@ -6254,7 +6267,8 @@ public:
class multi_update :public select_result_interceptor
{
TABLE_LIST *all_tables; /* query/update command tables */
- List<TABLE_LIST> *leaves; /* list of leves of join table tree */
+ List<TABLE_LIST> *leaves; /* list of leaves of join table tree */
+ List<TABLE_LIST> updated_leaves; /* list of of updated leaves */
TABLE_LIST *update_tables;
TABLE **tmp_tables, *main_table, *table_to_update;
TMP_TABLE_PARAM *tmp_table_param;
@@ -6292,6 +6306,7 @@ public:
List<Item> *fields, List<Item> *values,
enum_duplicates handle_duplicates, bool ignore);
~multi_update();
+ bool init(THD *thd);
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
int send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
@@ -6735,6 +6750,22 @@ class Sql_mode_save
sql_mode_t old_mode; // SQL mode saved at construction time.
};
+class Abort_on_warning_instant_set
+{
+ THD *m_thd;
+ bool m_save_abort_on_warning;
+public:
+ Abort_on_warning_instant_set(THD *thd, bool temporary_value)
+ :m_thd(thd), m_save_abort_on_warning(thd->abort_on_warning)
+ {
+ thd->abort_on_warning= temporary_value;
+ }
+ ~Abort_on_warning_instant_set()
+ {
+ m_thd->abort_on_warning= m_save_abort_on_warning;
+ }
+};
+
class Switch_to_definer_security_ctx
{
public:
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 643b7ee898a..4d09dab392b 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -92,7 +92,6 @@ int get_or_create_user_conn(THD *thd, const char *user,
uc->host= uc->user + user_len + 1;
uc->len= (uint)temp_len;
uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0;
- uc->user_resources= *mqh;
uc->reset_utime= thd->thr_create_utime;
if (my_hash_insert(&hash_user_connections, (uchar*) uc))
{
@@ -102,6 +101,7 @@ int get_or_create_user_conn(THD *thd, const char *user,
goto end;
}
}
+ uc->user_resources= *mqh;
thd->user_connect=uc;
uc->connections++;
end:
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index d4b6d815118..f3861ebd3e9 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -252,6 +252,8 @@ With_element *With_clause::find_table_def(TABLE_LIST *table,
!table->is_fqtn)
{
table->set_derived();
+ table->db.str= empty_c_string;
+ table->db.length= 0;
return with_elem;
}
}
@@ -887,8 +889,6 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
goto err;
spec_tables_tail= tbl;
}
- if (check_table_access(thd, SELECT_ACL, spec_tables, FALSE, UINT_MAX, FALSE))
- goto err;
if (spec_tables)
{
if (with_table->next_global)
@@ -904,6 +904,7 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
with_table->next_global= spec_tables;
}
res= &lex->unit;
+ res->with_element= this;
lex->unit.include_down(with_table->select_lex);
lex->unit.set_slave(with_select);
@@ -914,6 +915,22 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
with_select));
if (check_dependencies_in_with_clauses(lex->with_clauses_list))
res= NULL;
+ /*
+ Resolve references to CTE from the spec_tables list that has not
+ been resolved yet.
+ */
+ for (TABLE_LIST *tbl= spec_tables;
+ tbl;
+ tbl= tbl->next_global)
+ {
+ if (!tbl->with)
+ tbl->with= with_select->find_table_def_in_with_clauses(tbl);
+ if (tbl == spec_tables_tail)
+ break;
+ }
+ if (check_table_access(thd, SELECT_ACL, spec_tables, FALSE, UINT_MAX, FALSE))
+ goto err;
+
lex->sphead= NULL; // in order not to delete lex->sphead
lex_end(lex);
err:
@@ -1466,10 +1483,11 @@ void With_element::print(String *str, enum_query_type query_type)
bool With_element::instantiate_tmp_tables()
{
- List_iterator_fast<TABLE> li(rec_result->rec_tables);
- TABLE *rec_table;
- while ((rec_table= li++))
+ List_iterator_fast<TABLE_LIST> li(rec_result->rec_table_refs);
+ TABLE_LIST *rec_tbl;
+ while ((rec_tbl= li++))
{
+ TABLE *rec_table= rec_tbl->table;
if (!rec_table->is_created() &&
instantiate_tmp_table(rec_table,
rec_table->s->key_info,
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 5d006a7518c..9fa1e015274 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2010, 2019, MariaDB
+ Copyright (c) 2010, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -293,7 +293,15 @@ int TABLE::delete_row()
store_record(this, record[1]);
vers_update_end();
- return file->ha_update_row(record[1], record[0]);
+ int err= file->ha_update_row(record[1], record[0]);
+ /*
+ MDEV-23644: we get HA_ERR_FOREIGN_DUPLICATE_KEY iff we already got history
+ row with same trx_id which is the result of foreign key action, so we
+ don't need one more history row.
+ */
+ if (err == HA_ERR_FOREIGN_DUPLICATE_KEY)
+ return file->ha_delete_row(record[0]);
+ return err;
}
@@ -1120,14 +1128,11 @@ int mysql_multi_delete_prepare(THD *thd)
FALSE, DELETE_ACL, SELECT_ACL, FALSE))
DBUG_RETURN(TRUE);
- if (lex->first_select_lex()->handle_derived(thd->lex, DT_MERGE))
- DBUG_RETURN(TRUE);
-
/*
Multi-delete can't be constructed over-union => we always have
single SELECT on top and have to check underlying SELECTs of it
*/
- lex->first_select_lex()->exclude_from_table_unique_test= TRUE;
+ lex->first_select_lex()->set_unique_exclude();
/* Fix tables-to-be-deleted-from list to point at opened tables */
for (target_tbl= (TABLE_LIST*) aux_tables;
target_tbl;
@@ -1150,6 +1155,12 @@ int mysql_multi_delete_prepare(THD *thd)
target_tbl->table_name.str, "DELETE");
DBUG_RETURN(TRUE);
}
+ }
+
+ for (target_tbl= (TABLE_LIST*) aux_tables;
+ target_tbl;
+ target_tbl= target_tbl->next_local)
+ {
/*
Check that table from which we delete is not used somewhere
inside subqueries/view.
@@ -1194,12 +1205,6 @@ multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
unit= u;
do_delete= 1;
THD_STAGE_INFO(thd, stage_deleting_from_main_table);
- SELECT_LEX *select_lex= u->first_select();
- if (select_lex->first_cond_optimization)
- {
- if (select_lex->handle_derived(thd->lex, DT_MERGE))
- DBUG_RETURN(TRUE);
- }
DBUG_RETURN(0);
}
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 1e416c307cf..132872c4a9e 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2002, 2011, Oracle and/or its affiliates.
- Copyright (c) 2010, 2020, MariaDB
+ Copyright (c) 2010, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -382,10 +382,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_RETURN(FALSE);
}
- if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
- thd->lex->sql_command == SQLCOM_DELETE_MULTI)
- thd->save_prep_leaf_list= TRUE;
-
arena= thd->activate_stmt_arena_if_needed(&backup); // For easier test
if (!derived->merged_for_insert ||
@@ -459,6 +455,7 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
derived->on_expr= expr;
derived->prep_on_expr= expr->copy_andor_structure(thd);
}
+ thd->where= "on clause";
if (derived->on_expr &&
derived->on_expr->fix_fields_if_needed_for_bool(thd, &derived->on_expr))
{
@@ -596,6 +593,32 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived)
}
+/**
+ @brief
+ Prevent name resolution out of context of ON expressions in derived tables
+
+ @param
+ join_list list of tables used in from list of a derived
+
+ @details
+ The function sets the Name_resolution_context::outer_context to NULL
+ for all ON expressions contexts in the given join list. It does this
+ recursively for all nested joins the list contains.
+*/
+
+static void nullify_outer_context_for_on_clauses(List<TABLE_LIST>& join_list)
+{
+ List_iterator<TABLE_LIST> li(join_list);
+ while (TABLE_LIST *table= li++)
+ {
+ if (table->on_context)
+ table->on_context->outer_context= NULL;
+ if (table->nested_join)
+ nullify_outer_context_for_on_clauses(table->nested_join->join_list);
+ }
+}
+
+
/*
Create temporary table structure (but do not fill it)
@@ -710,7 +733,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
if (derived->is_with_table_recursive_reference())
{
/* Here 'derived" is a secondary recursive table reference */
- unit->with_element->rec_result->rec_tables.push_back(derived->table);
+ unit->with_element->rec_result->rec_table_refs.push_back(derived);
}
}
DBUG_ASSERT(derived->table || res);
@@ -760,7 +783,12 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
/* prevent name resolving out of derived table */
for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select())
{
+ // Prevent it for the WHERE clause
sl->context.outer_context= 0;
+
+ // And for ON clauses, if there are any
+ nullify_outer_context_for_on_clauses(*sl->join_list);
+
if (!derived->is_with_table_recursive_reference() ||
(!derived->with->with_anchor &&
!derived->with->is_with_prepared_anchor()))
@@ -808,17 +836,17 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
derived->fill_me= FALSE;
- if (!(derived->derived_result= new (thd->mem_root) select_unit(thd)))
+ if ((!derived->is_with_table_recursive_reference() ||
+ !derived->derived_result) &&
+ !(derived->derived_result= new (thd->mem_root) select_unit(thd)))
DBUG_RETURN(TRUE); // out of memory
- lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
// st_select_lex_unit::prepare correctly work for single select
if ((res= unit->prepare(derived, derived->derived_result, 0)))
goto exit;
if (derived->with &&
(res= derived->with->rename_columns_of_derived_unit(thd, unit)))
goto exit;
- lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
if ((res= check_duplicate_names(thd, unit->types, 0)))
goto exit;
@@ -827,7 +855,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
Depending on the result field translation will or will not
be created.
*/
- if (derived->init_derived(thd, FALSE))
+ if (!derived->is_with_table_recursive_reference() &&
+ derived->init_derived(thd, FALSE))
goto exit;
/*
@@ -1452,6 +1481,8 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived)
for (; sl; sl= sl->next_select())
{
Item *extracted_cond_copy;
+ if (!sl->cond_pushdown_is_allowed())
+ continue;
/*
For each select of the unit except the last one
create a clone of extracted_cond
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 12119997430..8447d5bea7d 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -687,7 +687,6 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++)
{
- my_bitmap_map *old_map;
/* note that 'item' can be changed by fix_fields() call */
if (item->fix_fields_if_needed_for_scalar(thd, it_ke.ref()))
return 1;
@@ -699,9 +698,9 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
}
if (!in_prepare)
{
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
(void) item->save_in_field(key_part->field, 1);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
key_len+= key_part->store_length;
keypart_map= (keypart_map << 1) | 1;
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index e5f1e958d99..81e5ad48e7e 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -742,6 +742,9 @@ static bool mysqld_help_internal(THD *thd, const char *mask)
&name, &description, &example);
delete select;
+ if (thd->is_error())
+ goto error;
+
if (count_topics == 0)
{
int UNINIT_VAR(key_id);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 01e5752def7..c969725bea4 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1981,6 +1981,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (likely(!error))
{
info->deleted++;
+ if (!table->file->has_transactions())
+ thd->transaction.stmt.modified_non_trans_table= TRUE;
if (table->versioned(VERS_TIMESTAMP))
{
store_record(table, record[2]);
@@ -2701,7 +2703,7 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
delayed_row *row= 0;
Delayed_insert *di=thd->di;
const Discrete_interval *forced_auto_inc;
- size_t user_len, host_len, ip_len;
+ size_t user_len, host_len, ip_length;
DBUG_ENTER("write_delayed");
DBUG_PRINT("enter", ("query = '%s' length %lu", query.str,
(ulong) query.length));
@@ -2735,7 +2737,7 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
goto err;
}
- user_len= host_len= ip_len= 0;
+ user_len= host_len= ip_length= 0;
row->user= row->host= row->ip= NULL;
if (thd->security_ctx)
{
@@ -2744,11 +2746,11 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
if (thd->security_ctx->host)
host_len= strlen(thd->security_ctx->host) + 1;
if (thd->security_ctx->ip)
- ip_len= strlen(thd->security_ctx->ip) + 1;
+ ip_length= strlen(thd->security_ctx->ip) + 1;
}
/* This can't be THREAD_SPECIFIC as it's freed in delayed thread */
if (!(row->record= (char*) my_malloc(table->s->reclength +
- user_len + host_len + ip_len,
+ user_len + host_len + ip_length,
MYF(MY_WME))))
goto err;
memcpy(row->record, table->record[0], table->s->reclength);
@@ -2768,7 +2770,7 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
if (thd->security_ctx->ip)
{
row->ip= row->record + table->s->reclength + user_len + host_len;
- memcpy(row->ip, thd->security_ctx->ip, ip_len);
+ memcpy(row->ip, thd->security_ctx->ip, ip_length);
}
}
row->query_id= thd->query_id;
@@ -2867,23 +2869,7 @@ void kill_delayed_threads(void)
mysql_mutex_lock(&di->thd.LOCK_thd_kill);
if (di->thd.killed < KILL_CONNECTION)
di->thd.set_killed_no_mutex(KILL_CONNECTION);
- if (di->thd.mysys_var)
- {
- mysql_mutex_lock(&di->thd.mysys_var->mutex);
- if (di->thd.mysys_var->current_cond)
- {
- /*
- We need the following test because the main mutex may be locked
- in handle_delayed_insert()
- */
- if (&di->mutex != di->thd.mysys_var->current_mutex)
- mysql_mutex_lock(di->thd.mysys_var->current_mutex);
- mysql_cond_broadcast(di->thd.mysys_var->current_cond);
- if (&di->mutex != di->thd.mysys_var->current_mutex)
- mysql_mutex_unlock(di->thd.mysys_var->current_mutex);
- }
- mysql_mutex_unlock(&di->thd.mysys_var->mutex);
- }
+ di->thd.abort_current_cond_wait(false);
mysql_mutex_unlock(&di->thd.LOCK_thd_kill);
}
mysql_mutex_unlock(&LOCK_delayed_insert); // For unlink from list
@@ -4738,7 +4724,8 @@ bool select_create::send_eof()
if (!table->s->tmp_table)
{
#ifdef WITH_WSREP
- if (WSREP(thd))
+ if (WSREP(thd) &&
+ table->file->ht->db_type == DB_TYPE_INNODB)
{
if (thd->wsrep_trx_id() == WSREP_UNDEFINED_TRX_ID)
{
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 20ed976daab..f072a675e31 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -1200,7 +1200,7 @@ bool JOIN_CACHE::check_emb_key_usage()
Item *item= ref->items[i]->real_item();
Field *fld= ((Item_field *) item)->field;
CACHE_FIELD *init_copy= field_descr+flag_fields+i;
- for (j= i, copy= init_copy; i < local_key_arg_fields; i++, copy++)
+ for (j= i, copy= init_copy; j < local_key_arg_fields; j++, copy++)
{
if (fld->eq(copy->field))
{
@@ -1650,7 +1650,7 @@ void JOIN_CACHE::get_record_by_pos(uchar *rec_ptr)
}
-/*
+/*
Get the match flag from the referenced record: the default implementation
SYNOPSIS
@@ -1662,6 +1662,7 @@ void JOIN_CACHE::get_record_by_pos(uchar *rec_ptr)
get the match flag for the record pointed by the reference at the position
rec_ptr. If the match flag is placed in one of the previous buffers the
function first reaches the linked record fields in this buffer.
+ The function returns the value of the first encountered match flag.
RETURN VALUE
match flag for the record at the position rec_ptr
@@ -1686,6 +1687,39 @@ enum JOIN_CACHE::Match_flag JOIN_CACHE::get_match_flag_by_pos(uchar *rec_ptr)
/*
+ Get the match flag for the referenced record from specified join buffer
+
+ SYNOPSIS
+ get_match_flag_by_pos_from_join_buffer()
+ rec_ptr position of the first field of the record in the join buffer
+ tab join table with join buffer where to look for the match flag
+
+ DESCRIPTION
+ This default implementation of the get_match_flag_by_pos_from_join_buffer
+ method gets the match flag for the record pointed by the reference at the
+ position rec_ptr from the join buffer attached to the join table tab.
+
+ RETURN VALUE
+ match flag for the record at the position rec_ptr from the join
+ buffer attached to the table tab.
+*/
+
+enum JOIN_CACHE::Match_flag
+JOIN_CACHE::get_match_flag_by_pos_from_join_buffer(uchar *rec_ptr,
+ JOIN_TAB *tab)
+{
+ DBUG_ASSERT(tab->cache && tab->cache->with_match_flag);
+ for (JOIN_CACHE *cache= this; ; )
+ {
+ if (cache->join_tab == tab)
+ return (enum Match_flag) rec_ptr[0];
+ cache= cache->prev_cache;
+ rec_ptr= cache->get_rec_ref(rec_ptr);
+ }
+}
+
+
+/*
Calculate the increment of the auxiliary buffer for a record write
SYNOPSIS
@@ -1955,6 +1989,10 @@ bool JOIN_CACHE::read_referenced_field(CACHE_FIELD *copy,
If the record is skipped the value of 'pos' is set to point to the position
right after the record.
+ NOTE
+ Currently this function is called only when generating null complemented
+ records for outer joins (=> only when join_tab->first_unmatched != NULL).
+
RETURN VALUE
TRUE the match flag is set to MATCH_FOUND and the record has been skipped
FALSE otherwise
@@ -1967,7 +2005,9 @@ bool JOIN_CACHE::skip_if_matched()
if (prev_cache)
offset+= prev_cache->get_size_of_rec_offset();
/* Check whether the match flag is MATCH_FOUND */
- if (get_match_flag_by_pos(pos+offset) == MATCH_FOUND)
+ if (get_match_flag_by_pos_from_join_buffer(pos+offset,
+ join_tab->first_unmatched) ==
+ MATCH_FOUND)
{
pos+= size_of_rec_len + get_rec_length(pos);
return TRUE;
@@ -1984,13 +2024,23 @@ bool JOIN_CACHE::skip_if_matched()
DESCRIPTION
This default implementation of the virtual function skip_if_not_needed_match
- skips the next record from the join buffer if its match flag is not
- MATCH_NOT_FOUND, and, either its value is MATCH_FOUND and join_tab is the
- first inner table of an inner join, or, its value is MATCH_IMPOSSIBLE
- and join_tab is the first inner table of an outer join.
+ skips the next record from the join when generating join extensions
+ for the records in the join buffer depending on the value of the match flag.
+ - In the case of a semi-nest the match flag may be in two states
+ {MATCH_NOT_FOUND, MATCH_FOUND}. The record is skipped if the flag is set
+ to MATCH_FOUND.
+ - In the case of a outer join nest when not_exists optimization is applied
+ the match may be in three states {MATCH_NOT_FOUND, MATCH_IMPOSSIBLE,
+ MATCH_FOUND. The record is skipped if the flag is set to MATCH_FOUND or
+ to MATCH_IMPOSSIBLE.
+
If the record is skipped the value of 'pos' is set to point to the position
right after the record.
+ NOTE
+ Currently the function is called only when generating non-null complemented
+ extensions for records in the join buffer.
+
RETURN VALUE
TRUE the record has to be skipped
FALSE otherwise
@@ -2001,11 +2051,19 @@ bool JOIN_CACHE::skip_if_not_needed_match()
DBUG_ASSERT(with_length);
enum Match_flag match_fl;
uint offset= size_of_rec_len;
+ bool skip= FALSE;
if (prev_cache)
offset+= prev_cache->get_size_of_rec_offset();
- if ((match_fl= get_match_flag_by_pos(pos+offset)) != MATCH_NOT_FOUND &&
- (join_tab->check_only_first_match() == (match_fl == MATCH_FOUND)) )
+ if (!join_tab->check_only_first_match())
+ return FALSE;
+
+ match_fl= get_match_flag_by_pos(pos+offset);
+ skip= join_tab->first_sj_inner_tab ?
+ match_fl == MATCH_FOUND : // the case of semi-join
+ match_fl != MATCH_NOT_FOUND; // the case of outer-join
+
+ if (skip)
{
pos+= size_of_rec_len + get_rec_length(pos);
return TRUE;
@@ -2105,7 +2163,14 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
goto finish;
}
join_tab->not_null_compl= FALSE;
- /* Prepare for generation of null complementing extensions */
+ /*
+ Prepare for generation of null complementing extensions.
+ For all inner tables of the outer join operation for which
+ regular matches have been just found the field 'first_unmatched'
+ is set to point the the first inner table. After all null
+ complement rows are generated for this outer join this field
+ is set back to NULL.
+ */
for (tab= join_tab->first_inner; tab <= join_tab->last_inner; tab++)
tab->first_unmatched= join_tab->first_inner;
}
@@ -2222,7 +2287,10 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
int error;
enum_nested_loop_state rc= NESTED_LOOP_OK;
join_tab->table->null_row= 0;
- bool check_only_first_match= join_tab->check_only_first_match();
+ bool check_only_first_match=
+ join_tab->check_only_first_match() &&
+ (!join_tab->first_inner || // semi-join case
+ join_tab->first_inner == join_tab->first_unmatched); // outer join case
bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join();
DBUG_ENTER("JOIN_CACHE::join_matching_records");
diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h
index 7b8b942180f..d0bf4761f65 100644
--- a/sql/sql_join_cache.h
+++ b/sql/sql_join_cache.h
@@ -206,7 +206,9 @@ protected:
/*
This flag indicates that records written into the join buffer contain
- a match flag field. The flag must be set by the init method.
+ a match flag field. The flag must be set by the init method.
+ Currently any implementation of the virtial init method calls
+ the function JOIN_CACHE::calc_record_fields() to set this flag.
*/
bool with_match_flag;
/*
@@ -646,6 +648,13 @@ public:
/* Shall return the value of the match flag for the positioned record */
virtual enum Match_flag get_match_flag_by_pos(uchar *rec_ptr);
+ /*
+ Shall return the value of the match flag for the positioned record
+ from the join buffer attached to the specified table
+ */
+ virtual enum Match_flag
+ get_match_flag_by_pos_from_join_buffer(uchar *rec_ptr, JOIN_TAB *tab);
+
/* Shall return the position of the current record */
virtual uchar *get_curr_rec() { return curr_rec_pos; }
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 10cd20e1207..5937c43c95d 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2289,6 +2289,8 @@ int Lex_input_stream::scan_ident_delimited(THD *thd,
Return the quote character, to have the parser fail on syntax error.
*/
m_ptr= (char *) m_tok_start + 1;
+ if (m_echo)
+ m_cpp_ptr= (char *) m_cpp_tok_start + 1;
return quote_char;
}
int var_length= my_charlen(cs, get_ptr() - 1, get_end_of_query());
@@ -2431,6 +2433,7 @@ void st_select_lex::init_query()
is_service_select= 0;
parsing_place= NO_MATTER;
save_parsing_place= NO_MATTER;
+ context_analysis_place= NO_MATTER;
exclude_from_table_unique_test= no_wrap_view_item= FALSE;
nest_level= 0;
link_next= 0;
@@ -2485,6 +2488,8 @@ void st_select_lex::init_select()
with_dep= 0;
join= 0;
lock_type= TL_READ_DEFAULT;
+ save_many_values.empty();
+ save_insert_list= 0;
tvc= 0;
in_funcs.empty();
curr_tvc_name= 0;
@@ -2528,6 +2533,8 @@ void st_select_lex_node::add_slave(st_select_lex_node *slave_arg)
{
slave= slave_arg;
slave_arg->master= this;
+ slave->prev= &master->slave;
+ slave->next= 0;
}
}
@@ -2550,6 +2557,27 @@ void st_select_lex_node::link_chain_down(st_select_lex_node *first)
}
/*
+ @brief
+ Substitute this node in select tree for a newly creates node
+
+ @param subst the node to substitute for
+
+ @details
+ The function substitute this node in the select tree for a newly
+ created node subst. This node is just removed from the tree but all
+ its link fields and the attached sub-tree remain untouched.
+*/
+
+void st_select_lex_node::substitute_in_tree(st_select_lex_node *subst)
+{
+ if ((subst->next= next))
+ next->prev= &subst->next;
+ subst->prev= prev;
+ (*prev)= subst;
+ subst->master= master;
+}
+
+/*
include on level down (but do not link)
SYNOPSYS
@@ -2779,7 +2807,7 @@ void st_select_lex_unit::exclude_tree()
*/
bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
- Item *dependency)
+ Item_ident *dependency)
{
DBUG_ASSERT(this != last);
@@ -2787,10 +2815,14 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
/*
Mark all selects from resolved to 1 before select where was
found table as depended (of select where was found table)
+
+ We move by name resolution context, bacause during merge can some select
+ be excleded from SELECT tree
*/
- SELECT_LEX *s= this;
+ Name_resolution_context *c= &this->context;
do
{
+ SELECT_LEX *s= c->select_lex;
if (!(s->uncacheable & UNCACHEABLE_DEPENDENT_GENERATED))
{
// Select is dependent of outer select
@@ -2812,7 +2844,7 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last,
if (subquery_expr && subquery_expr->mark_as_dependent(thd, last,
dependency))
return TRUE;
- } while ((s= s->outer_select()) != last && s != 0);
+ } while ((c= c->outer_context) != NULL && (c->select_lex != last));
is_correlated= TRUE;
this->master_unit()->item->is_correlated= TRUE;
return FALSE;
@@ -4831,14 +4863,14 @@ void st_select_lex::set_explain_type(bool on_the_fly)
/*
pos_in_table_list=NULL for e.g. post-join aggregation JOIN_TABs.
*/
- if (!tab->table);
- else if (const TABLE_LIST *pos= tab->table->pos_in_table_list)
+ if (!(tab->table && tab->table->pos_in_table_list))
+ continue;
+ TABLE_LIST *tbl= tab->table->pos_in_table_list;
+ if (tbl->with && tbl->with->is_recursive &&
+ tbl->is_with_table_recursive_reference())
{
- if (pos->with && pos->with->is_recursive)
- {
- uses_cte= true;
- break;
- }
+ uses_cte= true;
+ break;
}
}
if (uses_cte)
@@ -4981,6 +5013,9 @@ bool LEX::save_prep_leaf_tables()
bool st_select_lex::save_prep_leaf_tables(THD *thd)
{
+ if (prep_leaf_list_state == SAVED)
+ return FALSE;
+
List_iterator_fast<TABLE_LIST> li(leaf_tables);
TABLE_LIST *table;
@@ -5012,6 +5047,27 @@ bool st_select_lex::save_prep_leaf_tables(THD *thd)
}
+/**
+ Set exclude_from_table_unique_test for selects of this select and all selects
+ belonging to the underlying units of derived tables or views
+*/
+
+void st_select_lex::set_unique_exclude()
+{
+ exclude_from_table_unique_test= TRUE;
+ for (SELECT_LEX_UNIT *unit= first_inner_unit();
+ unit;
+ unit= unit->next_unit())
+ {
+ if (unit->derived && unit->derived->is_view_or_derived())
+ {
+ for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select())
+ sl->set_unique_exclude();
+ }
+ }
+}
+
+
/*
Return true if this select_lex has been converted into a semi-join nest
within 'ancestor'.
@@ -8869,7 +8925,6 @@ bool LEX::last_field_generated_always_as_row_end()
VERS_SYS_END_FLAG);
}
-
void st_select_lex_unit::reset_distinct()
{
union_distinct= NULL;
@@ -8885,6 +8940,20 @@ void st_select_lex_unit::reset_distinct()
}
+void LEX::save_values_list_state()
+{
+ current_select->save_many_values= many_values;
+ current_select->save_insert_list= insert_list;
+}
+
+
+void LEX::restore_values_list_state()
+{
+ many_values= current_select->save_many_values;
+ insert_list= current_select->save_insert_list;
+}
+
+
void st_select_lex_unit::fix_distinct()
{
if (union_distinct && this != union_distinct->master_unit())
@@ -9344,7 +9413,7 @@ SELECT_LEX *LEX::parsed_subselect(SELECT_LEX_UNIT *unit)
(curr_sel == NULL && current_select == &builtin_select));
if (curr_sel)
{
- curr_sel->register_unit(unit, &curr_sel->context);
+ curr_sel->register_unit(unit, context_stack.head());
curr_sel->add_statistics(unit);
}
@@ -9381,6 +9450,7 @@ bool LEX::parsed_insert_select(SELECT_LEX *first_select)
bool LEX::parsed_TVC_start()
{
SELECT_LEX *sel;
+ save_values_list_state();
many_values.empty();
insert_list= 0;
if (!(sel= alloc_select(TRUE)) ||
@@ -9394,14 +9464,13 @@ bool LEX::parsed_TVC_start()
SELECT_LEX *LEX::parsed_TVC_end()
{
-
SELECT_LEX *res= pop_select(); // above TVC select
if (!(res->tvc=
new (thd->mem_root) table_value_constr(many_values,
res,
res->options)))
return NULL;
- many_values.empty();
+ restore_values_list_state();
return res;
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 66c44f2d901..466b23b3f94 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2010, 2019, MariaDB Corporation.
+ Copyright (c) 2010, 2021, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -501,7 +501,7 @@ struct LEX_MASTER_INFO
}
host= user= password= log_file_name= ssl_key= ssl_cert= ssl_ca=
- ssl_capath= ssl_cipher= relay_log_name= 0;
+ ssl_capath= ssl_cipher= ssl_crl= ssl_crlpath= relay_log_name= NULL;
pos= relay_log_pos= server_id= port= connect_retry= 0;
heartbeat_period= 0;
ssl= ssl_verify_server_cert= heartbeat_opt=
@@ -764,7 +764,7 @@ public:
link_next= NULL;
link_prev= NULL;
}
-
+ void substitute_in_tree(st_select_lex_node *subst);
void set_slave(st_select_lex_node *slave_arg) { slave= slave_arg; }
void move_node(st_select_lex_node *where_to_move)
@@ -938,6 +938,10 @@ public:
void init_query();
st_select_lex* outer_select();
+ const st_select_lex* first_select() const
+ {
+ return reinterpret_cast<const st_select_lex*>(slave);
+ }
st_select_lex* first_select()
{
return reinterpret_cast<st_select_lex*>(slave);
@@ -1287,6 +1291,8 @@ public:
/* it is for correct printing SELECT options */
thr_lock_type lock_type;
+ List<List_item> save_many_values;
+ List<Item> *save_insert_list;
table_value_constr *tvc;
bool in_tvc;
@@ -1334,7 +1340,8 @@ public:
}
inline bool is_subquery_function() { return master_unit()->item != 0; }
- bool mark_as_dependent(THD *thd, st_select_lex *last, Item *dependency);
+ bool mark_as_dependent(THD *thd, st_select_lex *last,
+ Item_ident *dependency);
void set_braces(bool value)
{
@@ -1470,6 +1477,8 @@ public:
bool save_leaf_tables(THD *thd);
bool save_prep_leaf_tables(THD *thd);
+ void set_unique_exclude();
+
bool is_merged_child_of(st_select_lex *ancestor);
/*
@@ -3078,7 +3087,8 @@ public:
struct LEX: public Query_tables_list
{
SELECT_LEX_UNIT unit; /* most upper unit */
- inline SELECT_LEX *first_select_lex() {return unit.first_select();}
+ SELECT_LEX *first_select_lex() { return unit.first_select(); }
+ const SELECT_LEX *first_select_lex() const { return unit.first_select(); }
private:
SELECT_LEX builtin_select;
@@ -4342,6 +4352,25 @@ public:
return false;
}
+ bool create_like() const
+ {
+ DBUG_ASSERT(!create_info.like() ||
+ !first_select_lex()->item_list.elements);
+ return create_info.like();
+ }
+
+ bool create_select() const
+ {
+ DBUG_ASSERT(!create_info.like() ||
+ !first_select_lex()->item_list.elements);
+ return first_select_lex()->item_list.elements;
+ }
+
+ bool create_simple() const
+ {
+ return !create_like() && !create_select();
+ }
+
SELECT_LEX *exclude_last_select();
SELECT_LEX *exclude_not_first_select(SELECT_LEX *exclude);
void check_automatic_up(enum sub_select_type type);
@@ -4409,13 +4438,6 @@ public:
return false;
}
- void tvc_start()
- {
- field_list.empty();
- many_values.empty();
- insert_list= 0;
- }
-
SELECT_LEX_UNIT *alloc_unit();
SELECT_LEX *alloc_select(bool is_select);
SELECT_LEX_UNIT *create_unit(SELECT_LEX*);
@@ -4470,6 +4492,8 @@ public:
bool distinct);
SELECT_LEX *parsed_subselect(SELECT_LEX_UNIT *unit);
bool parsed_insert_select(SELECT_LEX *firs_select);
+ void save_values_list_state();
+ void restore_values_list_state();
bool parsed_TVC_start();
SELECT_LEX *parsed_TVC_end();
TABLE_LIST *parsed_derived_table(SELECT_LEX_UNIT *unit,
diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc
index 2ad8d8a914a..b08e43e8af2 100644
--- a/sql/sql_manager.cc
+++ b/sql/sql_manager.cc
@@ -26,8 +26,8 @@
#include "sql_manager.h"
#include "sql_base.h" // flush_tables
-static bool volatile manager_thread_in_use;
-static bool abort_manager;
+static bool volatile manager_thread_in_use = 0;
+static bool abort_manager = false;
pthread_t manager_thread;
mysql_mutex_t LOCK_manager;
@@ -35,31 +35,31 @@ mysql_cond_t COND_manager;
struct handler_cb {
struct handler_cb *next;
- void (*action)(void);
+ void (*action)(void *);
+ void *data;
};
-static struct handler_cb * volatile cb_list;
+static struct handler_cb *cb_list; // protected by LOCK_manager
-bool mysql_manager_submit(void (*action)())
+bool mysql_manager_submit(void (*action)(void *), void *data)
{
bool result= FALSE;
DBUG_ASSERT(manager_thread_in_use);
- struct handler_cb * volatile *cb;
+ struct handler_cb **cb;
mysql_mutex_lock(&LOCK_manager);
cb= &cb_list;
- while (*cb && (*cb)->action != action)
+ while (*cb)
cb= &(*cb)->next;
+ *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
if (!*cb)
+ result= TRUE;
+ else
{
- *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
- if (!*cb)
- result= TRUE;
- else
- {
- (*cb)->next= NULL;
- (*cb)->action= action;
- }
+ (*cb)->next= NULL;
+ (*cb)->action= action;
+ (*cb)->data= data;
}
+ mysql_cond_signal(&COND_manager);
mysql_mutex_unlock(&LOCK_manager);
return result;
}
@@ -69,18 +69,14 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
int error = 0;
struct timespec abstime;
bool reset_flush_time = TRUE;
- struct handler_cb *cb= NULL;
my_thread_init();
DBUG_ENTER("handle_manager");
pthread_detach_this_thread();
manager_thread = pthread_self();
- mysql_cond_init(key_COND_manager, &COND_manager,NULL);
- mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL);
- manager_thread_in_use = 1;
- for (;;)
+ mysql_mutex_lock(&LOCK_manager);
+ while (!abort_manager)
{
- mysql_mutex_lock(&LOCK_manager);
/* XXX: This will need to be made more general to handle different
* polling needs. */
if (flush_time)
@@ -90,40 +86,37 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
set_timespec(abstime, flush_time);
reset_flush_time = FALSE;
}
- while ((!error || error == EINTR) && !abort_manager)
+ while ((!error || error == EINTR) && !abort_manager && !cb_list)
error= mysql_cond_timedwait(&COND_manager, &LOCK_manager, &abstime);
+
+ if (error == ETIMEDOUT || error == ETIME)
+ {
+ tc_purge();
+ error = 0;
+ reset_flush_time = TRUE;
+ }
}
else
{
- while ((!error || error == EINTR) && !abort_manager)
+ while ((!error || error == EINTR) && !abort_manager && !cb_list)
error= mysql_cond_wait(&COND_manager, &LOCK_manager);
}
- if (cb == NULL)
- {
- cb= cb_list;
- cb_list= NULL;
- }
- mysql_mutex_unlock(&LOCK_manager);
- if (abort_manager)
- break;
-
- if (error == ETIMEDOUT || error == ETIME)
- {
- tc_purge();
- error = 0;
- reset_flush_time = TRUE;
- }
+ struct handler_cb *cb= cb_list;
+ cb_list= NULL;
+ mysql_mutex_unlock(&LOCK_manager);
while (cb)
{
struct handler_cb *next= cb->next;
- cb->action();
+ cb->action(cb->data);
my_free(cb);
cb= next;
}
+ mysql_mutex_lock(&LOCK_manager);
}
manager_thread_in_use = 0;
+ mysql_mutex_unlock(&LOCK_manager);
mysql_mutex_destroy(&LOCK_manager);
mysql_cond_destroy(&COND_manager);
DBUG_LEAVE; // Can't use DBUG_RETURN after my_thread_end
@@ -137,15 +130,15 @@ void start_handle_manager()
{
DBUG_ENTER("start_handle_manager");
abort_manager = false;
- if (flush_time && flush_time != ~(ulong) 0L)
{
pthread_t hThread;
- int error;
- if ((error= mysql_thread_create(key_thread_handle_manager,
- &hThread, &connection_attrib,
- handle_manager, 0)))
- sql_print_warning("Can't create handle_manager thread (errno= %d)",
- error);
+ int err;
+ manager_thread_in_use = 1;
+ mysql_cond_init(key_COND_manager, &COND_manager,NULL);
+ mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL);
+ if ((err= mysql_thread_create(key_thread_handle_manager, &hThread,
+ &connection_attrib, handle_manager, 0)))
+ sql_print_warning("Can't create handle_manager thread (errno: %M)", err);
}
DBUG_VOID_RETURN;
}
@@ -155,10 +148,10 @@ void start_handle_manager()
void stop_handle_manager()
{
DBUG_ENTER("stop_handle_manager");
- abort_manager = true;
if (manager_thread_in_use)
{
mysql_mutex_lock(&LOCK_manager);
+ abort_manager = true;
DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: %lu",
(ulong)manager_thread));
mysql_cond_signal(&COND_manager);
diff --git a/sql/sql_manager.h b/sql/sql_manager.h
index 9c6c84450ed..f97d4a2cfc5 100644
--- a/sql/sql_manager.h
+++ b/sql/sql_manager.h
@@ -18,6 +18,6 @@
void start_handle_manager();
void stop_handle_manager();
-bool mysql_manager_submit(void (*action)());
+bool mysql_manager_submit(void (*action)(void *), void *data);
#endif /* SQL_MANAGER_INCLUDED */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 036c1215ea6..3ae7c7c7df3 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2020, MariaDB
+ Copyright (c) 2008, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1164,6 +1164,14 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables)
}
return true;
}
+
+static bool wsrep_command_no_result(char command)
+{
+ return (command == COM_STMT_PREPARE ||
+ command == COM_STMT_FETCH ||
+ command == COM_STMT_SEND_LONG_DATA ||
+ command == COM_STMT_CLOSE);
+}
#endif /* WITH_WSREP */
#ifndef EMBEDDED_LIBRARY
@@ -1287,12 +1295,20 @@ bool do_command(THD *thd)
#ifdef WITH_WSREP
DEBUG_SYNC(thd, "wsrep_before_before_command");
/*
- Aborted by background rollbacker thread.
- Handle error here and jump straight to out
+ If this command does not return a result, then we
+ instruct wsrep_before_command() to skip result handling.
+ This causes BF aborted transaction to roll back but keep
+ the error state until next command which is able to return
+ a result to the client.
*/
- if (wsrep_before_command(thd))
+ if (wsrep_before_command(thd, wsrep_command_no_result(command)))
{
- thd->store_globals();
+ /*
+ Aborted by background rollbacker thread.
+ Handle error here and jump straight to out.
+ Notice that thd->store_globals() is called
+ in wsrep_before_command().
+ */
WSREP_LOG_THD(thd, "enter found BF aborted");
DBUG_ASSERT(!thd->mdl_context.has_locks());
DBUG_ASSERT(!thd->get_stmt_da()->is_set());
@@ -1625,7 +1641,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (unlikely(thd->security_ctx->password_expired &&
command != COM_QUERY &&
command != COM_PING &&
- command != COM_QUIT))
+ command != COM_QUIT &&
+ command != COM_STMT_PREPARE &&
+ command != COM_STMT_EXECUTE))
{
my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
goto dispatch_end;
@@ -2204,6 +2222,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
general_log_print(thd, command, NullS);
status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS]);
+ *current_global_status_var= global_status_var;
calc_sum_of_all_status(current_global_status_var);
if (!(uptime= (ulong) (thd->start_time - server_start_time)))
queries_per_second1000= 0;
@@ -2384,12 +2403,7 @@ dispatch_end:
WSREP_DEBUG("THD is killed at dispatch_end");
}
wsrep_after_command_before_result(thd);
- if (wsrep_current_error(thd) &&
- !(command == COM_STMT_PREPARE ||
- command == COM_STMT_FETCH ||
- command == COM_STMT_SEND_LONG_DATA ||
- command == COM_STMT_CLOSE
- ))
+ if (wsrep_current_error(thd) && !wsrep_command_no_result(command))
{
/* todo: Pass wsrep client state current error to override */
wsrep_override_error(thd, wsrep_current_error(thd),
@@ -3294,6 +3308,146 @@ bool Sql_cmd_call::execute(THD *thd)
/**
+ Check whether the SQL statement being processed is prepended by
+ SET STATEMENT clause and handle variables assignment if it is.
+
+ @param thd thread handle
+ @param lex current lex
+
+ @return false in case of success, true in case of error.
+*/
+
+bool run_set_statement_if_requested(THD *thd, LEX *lex)
+{
+ if (!lex->stmt_var_list.is_empty() && !thd->slave_thread)
+ {
+ Query_arena backup;
+ DBUG_PRINT("info", ("SET STATEMENT %d vars", lex->stmt_var_list.elements));
+
+ lex->old_var_list.empty();
+ List_iterator_fast<set_var_base> it(lex->stmt_var_list);
+ set_var_base *var;
+
+ if (lex->set_arena_for_set_stmt(&backup))
+ return true;
+
+ MEM_ROOT *mem_root= thd->mem_root;
+ while ((var= it++))
+ {
+ DBUG_ASSERT(var->is_system());
+ set_var *o= NULL, *v= (set_var*)var;
+ if (!v->var->is_set_stmt_ok())
+ {
+ my_error(ER_SET_STATEMENT_NOT_SUPPORTED, MYF(0), v->var->name.str);
+ lex->reset_arena_for_set_stmt(&backup);
+ lex->old_var_list.empty();
+ lex->free_arena_for_set_stmt();
+ return true;
+ }
+ if (v->var->session_is_default(thd))
+ o= new set_var(thd,v->type, v->var, &v->base, NULL);
+ else
+ {
+ switch (v->var->option.var_type & GET_TYPE_MASK)
+ {
+ case GET_BOOL:
+ case GET_INT:
+ case GET_LONG:
+ case GET_LL:
+ {
+ bool null_value;
+ longlong val= v->var->val_int(&null_value, thd, v->type, &v->base);
+ o= new set_var(thd, v->type, v->var, &v->base,
+ (null_value ?
+ (Item *) new (mem_root) Item_null(thd) :
+ (Item *) new (mem_root) Item_int(thd, val)));
+ }
+ break;
+ case GET_UINT:
+ case GET_ULONG:
+ case GET_ULL:
+ {
+ bool null_value;
+ ulonglong val= v->var->val_int(&null_value, thd, v->type, &v->base);
+ o= new set_var(thd, v->type, v->var, &v->base,
+ (null_value ?
+ (Item *) new (mem_root) Item_null(thd) :
+ (Item *) new (mem_root) Item_uint(thd, val)));
+ }
+ break;
+ case GET_DOUBLE:
+ {
+ bool null_value;
+ double val= v->var->val_real(&null_value, thd, v->type, &v->base);
+ o= new set_var(thd, v->type, v->var, &v->base,
+ (null_value ?
+ (Item *) new (mem_root) Item_null(thd) :
+ (Item *) new (mem_root) Item_float(thd, val, 1)));
+ }
+ break;
+ default:
+ case GET_NO_ARG:
+ case GET_DISABLED:
+ DBUG_ASSERT(0);
+ /* fall through */
+ case 0:
+ case GET_FLAGSET:
+ case GET_ENUM:
+ case GET_SET:
+ case GET_STR:
+ case GET_STR_ALLOC:
+ {
+ char buff[STRING_BUFFER_USUAL_SIZE];
+ String tmp(buff, sizeof(buff), v->var->charset(thd)),*val;
+ val= v->var->val_str(&tmp, thd, v->type, &v->base);
+ if (val)
+ {
+ Item_string *str=
+ new (mem_root) Item_string(thd, v->var->charset(thd),
+ val->ptr(), val->length());
+ o= new set_var(thd, v->type, v->var, &v->base, str);
+ }
+ else
+ o= new set_var(thd, v->type, v->var, &v->base,
+ new (mem_root) Item_null(thd));
+ }
+ break;
+ }
+ }
+ DBUG_ASSERT(o);
+ lex->old_var_list.push_back(o, thd->mem_root);
+ }
+ lex->reset_arena_for_set_stmt(&backup);
+
+ if (lex->old_var_list.is_empty())
+ lex->free_arena_for_set_stmt();
+
+ if (thd->is_error() ||
+ sql_set_variables(thd, &lex->stmt_var_list, false))
+ {
+ if (!thd->is_error())
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), "SET");
+ lex->restore_set_statement_var();
+ return true;
+ }
+ /*
+ The value of last_insert_id is remembered in THD to be written to binlog
+ when it's used *the first time* in the statement. But SET STATEMENT
+ must read the old value of last_insert_id to be able to restore it at
+ the end. This should not count at "reading of last_insert_id" and
+ should not remember last_insert_id for binlog. That is, it should clear
+ stmt_depends_on_first_successful_insert_id_in_prev_stmt flag.
+ */
+ if (!thd->in_sub_stmt)
+ {
+ thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
+ }
+ }
+ return false;
+}
+
+
+/**
Execute command saved in thd and lex->sql_command.
@param thd Thread handle
@@ -3338,7 +3492,10 @@ mysql_execute_command(THD *thd)
first_table->for_insert_data);
if (thd->security_ctx->password_expired &&
- lex->sql_command != SQLCOM_SET_OPTION)
+ lex->sql_command != SQLCOM_SET_OPTION &&
+ lex->sql_command != SQLCOM_PREPARE &&
+ lex->sql_command != SQLCOM_EXECUTE &&
+ lex->sql_command != SQLCOM_DEALLOCATE_PREPARE)
{
my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
DBUG_RETURN(1);
@@ -3521,6 +3678,11 @@ mysql_execute_command(THD *thd)
Json_writer_object trace_command(thd);
Json_writer_array trace_command_steps(thd, "steps");
+ /* store old value of binlog format */
+ enum_binlog_format orig_binlog_format,orig_current_stmt_binlog_format;
+
+ thd->get_binlog_format(&orig_binlog_format,
+ &orig_current_stmt_binlog_format);
#ifdef WITH_WSREP
if (WSREP(thd))
{
@@ -3572,133 +3734,13 @@ mysql_execute_command(THD *thd)
DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table == FALSE);
- /* store old value of binlog format */
- enum_binlog_format orig_binlog_format,orig_current_stmt_binlog_format;
-
- thd->get_binlog_format(&orig_binlog_format,
- &orig_current_stmt_binlog_format);
-
- if (!lex->stmt_var_list.is_empty() && !thd->slave_thread)
- {
- Query_arena backup;
- DBUG_PRINT("info", ("SET STATEMENT %d vars", lex->stmt_var_list.elements));
-
- lex->old_var_list.empty();
- List_iterator_fast<set_var_base> it(lex->stmt_var_list);
- set_var_base *var;
-
- if (lex->set_arena_for_set_stmt(&backup))
- goto error;
-
- MEM_ROOT *mem_root= thd->mem_root;
- while ((var= it++))
- {
- DBUG_ASSERT(var->is_system());
- set_var *o= NULL, *v= (set_var*)var;
- if (!v->var->is_set_stmt_ok())
- {
- my_error(ER_SET_STATEMENT_NOT_SUPPORTED, MYF(0), v->var->name.str);
- lex->reset_arena_for_set_stmt(&backup);
- lex->old_var_list.empty();
- lex->free_arena_for_set_stmt();
- goto error;
- }
- if (v->var->session_is_default(thd))
- o= new set_var(thd,v->type, v->var, &v->base, NULL);
- else
- {
- switch (v->var->option.var_type & GET_TYPE_MASK)
- {
- case GET_BOOL:
- case GET_INT:
- case GET_LONG:
- case GET_LL:
- {
- bool null_value;
- longlong val= v->var->val_int(&null_value, thd, v->type, &v->base);
- o= new set_var(thd, v->type, v->var, &v->base,
- (null_value ?
- (Item *) new (mem_root) Item_null(thd) :
- (Item *) new (mem_root) Item_int(thd, val)));
- }
- break;
- case GET_UINT:
- case GET_ULONG:
- case GET_ULL:
- {
- bool null_value;
- ulonglong val= v->var->val_int(&null_value, thd, v->type, &v->base);
- o= new set_var(thd, v->type, v->var, &v->base,
- (null_value ?
- (Item *) new (mem_root) Item_null(thd) :
- (Item *) new (mem_root) Item_uint(thd, val)));
- }
- break;
- case GET_DOUBLE:
- {
- bool null_value;
- double val= v->var->val_real(&null_value, thd, v->type, &v->base);
- o= new set_var(thd, v->type, v->var, &v->base,
- (null_value ?
- (Item *) new (mem_root) Item_null(thd) :
- (Item *) new (mem_root) Item_float(thd, val, 1)));
- }
- break;
- default:
- case GET_NO_ARG:
- case GET_DISABLED:
- DBUG_ASSERT(0);
- /* fall through */
- case 0:
- case GET_FLAGSET:
- case GET_ENUM:
- case GET_SET:
- case GET_STR:
- case GET_STR_ALLOC:
- {
- char buff[STRING_BUFFER_USUAL_SIZE];
- String tmp(buff, sizeof(buff), v->var->charset(thd)),*val;
- val= v->var->val_str(&tmp, thd, v->type, &v->base);
- if (val)
- {
- Item_string *str= new (mem_root) Item_string(thd, v->var->charset(thd),
- val->ptr(), val->length());
- o= new set_var(thd, v->type, v->var, &v->base, str);
- }
- else
- o= new set_var(thd, v->type, v->var, &v->base,
- new (mem_root) Item_null(thd));
- }
- break;
- }
- }
- DBUG_ASSERT(o);
- lex->old_var_list.push_back(o, thd->mem_root);
- }
- lex->reset_arena_for_set_stmt(&backup);
- if (lex->old_var_list.is_empty())
- lex->free_arena_for_set_stmt();
- if (thd->is_error() ||
- (res= sql_set_variables(thd, &lex->stmt_var_list, false)))
- {
- if (!thd->is_error())
- my_error(ER_WRONG_ARGUMENTS, MYF(0), "SET");
- lex->restore_set_statement_var();
- goto error;
- }
- /*
- The value of last_insert_id is remembered in THD to be written to binlog
- when it's used *the first time* in the statement. But SET STATEMENT
- must read the old value of last_insert_id to be able to restore it at
- the end. This should not count at "reading of last_insert_id" and
- should not remember last_insert_id for binlog. That is, it should clear
- stmt_depends_on_first_successful_insert_id_in_prev_stmt flag.
- */
- if (!thd->in_sub_stmt)
- {
- thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
- }
- }
+ /*
+ Assign system variables with values specified by the clause
+ SET STATEMENT var1=value1 [, var2=value2, ...] FOR <statement>
+ if they are any.
+ */
+ if (run_set_statement_if_requested(thd, lex))
+ goto error;
if (thd->lex->mi.connection_name.str == NULL)
thd->lex->mi.connection_name= thd->variables.default_master_connection;
@@ -3744,7 +3786,8 @@ mysql_execute_command(THD *thd)
thd->transaction.stmt.mark_trans_did_ddl();
#ifdef WITH_WSREP
/* Clean up the previous transaction on implicit commit */
- if (wsrep_thd_is_local(thd) && wsrep_after_statement(thd))
+ if (WSREP_NNULL(thd) && wsrep_thd_is_local(thd) &&
+ wsrep_after_statement(thd))
{
goto error;
}
@@ -3818,7 +3861,7 @@ mysql_execute_command(THD *thd)
Do not start transaction for stored procedures, it will be handled
internally in SP processing.
*/
- if (WSREP(thd) &&
+ if (WSREP_NNULL(thd) &&
wsrep_thd_is_local(thd) &&
lex->sql_command != SQLCOM_BEGIN &&
lex->sql_command != SQLCOM_CALL &&
@@ -5987,6 +6030,14 @@ mysql_execute_command(THD *thd)
break;
}
case SQLCOM_XA_START:
+#ifdef WITH_WSREP
+ if (WSREP(thd))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "XA transactions with Galera replication");
+ break;
+ }
+#endif /* WITH_WSREP */
if (trans_xa_start(thd))
goto error;
my_ok(thd);
@@ -6797,6 +6848,9 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
bool check_single_table_access(THD *thd, ulong privilege, TABLE_LIST *tables,
bool no_errors)
{
+ if (tables->derived)
+ return 0;
+
Switch_to_definer_security_ctx backup_sctx(thd, tables);
const char *db_name;
@@ -7503,9 +7557,14 @@ void THD::reset_for_next_command(bool do_clear_error)
save_prep_leaf_list= false;
- DBUG_PRINT("debug",
- ("is_current_stmt_binlog_format_row(): %d",
- is_current_stmt_binlog_format_row()));
+#ifdef WITH_WSREP
+#if !defined(DBUG_OFF)
+ if (mysql_bin_log.is_open())
+#endif
+#endif
+ DBUG_PRINT("debug",
+ ("is_current_stmt_binlog_format_row(): %d",
+ is_current_stmt_binlog_format_row()));
DBUG_VOID_RETURN;
}
@@ -8934,7 +8993,13 @@ push_new_name_resolution_context(THD *thd,
left_op->first_leaf_for_name_resolution();
on_context->last_name_resolution_table=
right_op->last_leaf_for_name_resolution();
- return thd->lex->push_context(on_context);
+ LEX *lex= thd->lex;
+ on_context->select_lex = lex->current_select;
+ st_select_lex *curr_select= lex->pop_select();
+ st_select_lex *outer_sel= lex->select_stack_head();
+ lex->push_select(curr_select);
+ on_context->outer_context = outer_sel ? &outer_sel->context : 0;
+ return lex->push_context(on_context);
}
@@ -9056,10 +9121,9 @@ struct find_thread_callback_arg
};
-my_bool find_thread_callback(THD *thd, find_thread_callback_arg *arg)
+static my_bool find_thread_callback(THD *thd, find_thread_callback_arg *arg)
{
- if (thd->get_command() != COM_DAEMON &&
- arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id))
+ if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id))
{
mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
arg->thd= thd;
@@ -9076,27 +9140,6 @@ THD *find_thread_by_id(longlong id, bool query_id)
return arg.thd;
}
-#ifdef WITH_WSREP
-my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_callback_arg *arg)
-{
- if (thd->get_command() != COM_DAEMON &&
- arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id))
- {
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
- mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
- arg->thd= thd;
- return 1;
- }
- return 0;
-}
-THD *find_thread_by_id_with_thd_data_lock(longlong id, bool query_id)
-{
- find_thread_callback_arg arg(id, query_id);
- server_threads.iterate(find_thread_with_thd_data_lock_callback, &arg);
- return arg.thd;
-}
-#endif
-
/**
kill one thread.
@@ -9113,11 +9156,11 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
uint error= (type == KILL_TYPE_QUERY ? ER_NO_SUCH_QUERY : ER_NO_SUCH_THREAD);
DBUG_ENTER("kill_one_thread");
DBUG_PRINT("enter", ("id: %lld signal: %u", id, (uint) kill_signal));
-#ifdef WITH_WSREP
- if (id && (tmp= find_thread_by_id_with_thd_data_lock(id, type == KILL_TYPE_QUERY)))
-#else
- if (id && (tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY)))
-#endif
+ tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY);
+ if (!tmp)
+ DBUG_RETURN(error);
+
+ if (tmp->get_command() != COM_DAEMON)
{
/*
If we're SUPER, we can KILL anything, including system-threads.
@@ -9140,6 +9183,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
faster and do a harder kill than KILL_SYSTEM_THREAD;
*/
+ mysql_mutex_lock(&tmp->LOCK_thd_data); // for various wsrep* checks below
#ifdef WITH_WSREP
if (((thd->security_ctx->master_access & SUPER_ACL) ||
thd->security_ctx->user_matches(tmp->security_ctx)) &&
@@ -9161,8 +9205,8 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
else
#endif /* WITH_WSREP */
{
- WSREP_DEBUG("kill_one_thread %llu, victim: %llu wsrep_aborter %llu by signal %d",
- thd->thread_id, id, tmp->wsrep_aborter, kill_signal);
+ WSREP_DEBUG("kill_one_thread %llu, victim: %llu wsrep_aborter %llu by signal %d",
+ thd->thread_id, id, tmp->wsrep_aborter, kill_signal);
tmp->awake_no_mutex(kill_signal);
WSREP_DEBUG("victim: %llu taken care of", id);
error= 0;
@@ -9171,11 +9215,9 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
else
error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR :
ER_KILL_DENIED_ERROR);
-#ifdef WITH_WSREP
- if (WSREP(tmp)) mysql_mutex_unlock(&tmp->LOCK_thd_data);
-#endif
- mysql_mutex_unlock(&tmp->LOCK_thd_kill);
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
+ mysql_mutex_unlock(&tmp->LOCK_thd_kill);
DBUG_PRINT("exit", ("%d", error));
DBUG_RETURN(error);
}
@@ -9221,8 +9263,8 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg)
return 1;
if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root))
{
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
+ mysql_mutex_lock(&thd->LOCK_thd_data);
}
}
}
@@ -9265,7 +9307,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
*/
next_ptr= it2++;
mysql_mutex_unlock(&ptr->LOCK_thd_kill);
- if (WSREP(ptr)) mysql_mutex_unlock(&ptr->LOCK_thd_data);
+ mysql_mutex_unlock(&ptr->LOCK_thd_data);
(*rows)++;
} while ((ptr= next_ptr));
}
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 1d25b898ca4..be37e3f6bb3 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -100,6 +100,7 @@ void mysql_init_multi_delete(LEX *lex);
bool multi_delete_set_locks_and_link_aux_tables(LEX *lex);
void create_table_set_open_action_and_adjust_tables(LEX *lex);
int bootstrap(MYSQL_FILE *file);
+bool run_set_statement_if_requested(THD *thd, LEX *lex);
int mysql_execute_command(THD *thd);
bool do_command(THD *thd);
bool dispatch_command(enum enum_server_command command, THD *thd,
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 7ed1eb7aa52..43264e3e508 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1505,7 +1505,7 @@ static bool check_list_constants(THD *thd, partition_info *part_info)
List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
while ((list_value= list_val_it2++))
{
- calc_value= list_value->value - type_add;
+ calc_value= list_value->value ^ type_add;
part_info->list_array[list_index].list_value= calc_value;
part_info->list_array[list_index++].partition_id= i;
}
@@ -2388,6 +2388,8 @@ static int add_column_list_values(String *str, partition_info *part_info,
*/
if (create_info)
{
+ const Column_derived_attributes
+ derived_attr(create_info->default_table_charset);
Create_field *sql_field;
if (!(sql_field= get_sql_field(field_name,
@@ -2402,7 +2404,7 @@ static int add_column_list_values(String *str, partition_info *part_info,
&need_cs_check))
return 1;
if (need_cs_check)
- field_cs= get_sql_field_charset(sql_field, create_info);
+ field_cs= sql_field->explicit_or_derived_charset(&derived_attr);
else
field_cs= NULL;
}
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index c9c75d07a6e..eb5532f5d59 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -22,10 +22,11 @@
that is defined in plugin.h
*/
#define SHOW_always_last SHOW_KEY_CACHE_LONG, \
- SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, \
SHOW_HAVE, SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, \
- SHOW_LONG_NOFLUSH, SHOW_LONGLONG_STATUS, SHOW_UINT32_STATUS, \
- SHOW_LEX_STRING, SHOW_ATOMIC_COUNTER_UINT32_T
+ SHOW_LONG_NOFLUSH, SHOW_LEX_STRING, SHOW_ATOMIC_COUNTER_UINT32_T, \
+ /* SHOW_*_STATUS must be at the end, SHOW_LONG_STATUS being first */ \
+ SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_LONGLONG_STATUS, \
+ SHOW_UINT32_STATUS
#include "mariadb.h"
#undef SHOW_always_last
@@ -204,4 +205,3 @@ extern void sync_dynamic_session_variables(THD* thd, bool global_lock);
extern void wsrep_plugins_pre_init();
extern void wsrep_plugins_post_init();
#endif /* WITH_WSREP */
-
diff --git a/sql/sql_plugin_services.ic b/sql/sql_plugin_services.ic
index 69e57de5c8b..740569dc76e 100644
--- a/sql/sql_plugin_services.ic
+++ b/sql/sql_plugin_services.ic
@@ -155,7 +155,6 @@ static struct wsrep_service_st wsrep_handler = {
wsrep_thd_retry_counter,
wsrep_thd_ignore_table,
wsrep_thd_trx_seqno,
- wsrep_thd_auto_increment_variables,
wsrep_thd_is_aborting,
wsrep_set_data_home_dir,
wsrep_thd_is_BF,
@@ -175,7 +174,9 @@ static struct wsrep_service_st wsrep_handler = {
wsrep_commit_ordered,
wsrep_thd_is_applying,
wsrep_thd_set_wsrep_aborter,
- wsrep_report_bf_lock_wait
+ wsrep_report_bf_lock_wait,
+ wsrep_thd_kill_LOCK,
+ wsrep_thd_kill_UNLOCK
};
static struct thd_specifics_service_st thd_specifics_handler=
@@ -245,4 +246,3 @@ static struct st_service_ref list_of_services[]=
{ "wsrep_service", VERSION_wsrep, &wsrep_handler },
{ "json_service", VERSION_json, &json_handler }
};
-
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ee504028074..f0b8fc7309e 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2002, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2019, MariaDB
+ Copyright (c) 2008, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -118,8 +118,11 @@ When one supplies long data for a placeholder:
#include <mysql.h>
#else
#include <mysql_com.h>
+/* Constants defining bits in parameter type flags. Flags are read from high byte of short value */
+static const uint PARAMETER_FLAG_UNSIGNED= 128U << 8;
#endif
#include "lock.h" // MYSQL_OPEN_FORCE_SHARED_MDL
+#include "log_event.h" // class Log_event
#include "sql_handler.h"
#include "transaction.h" // trans_rollback_implicit
#ifdef WITH_WSREP
@@ -127,9 +130,6 @@ When one supplies long data for a placeholder:
#include "wsrep_trans_observer.h"
#endif /* WITH_WSREP */
-/* Constants defining bits in parameter type flags. Flags are read from high byte of short value */
-static const uint PARAMETER_FLAG_UNSIGNED = 128U << 8;
-
/**
A result class used to send cursor rows using the binary protocol.
*/
@@ -2488,6 +2488,16 @@ static bool check_prepared_statement(Prepared_statement *stmt)
DBUG_RETURN(FALSE);
}
break;
+ case SQLCOM_SHOW_BINLOG_EVENTS:
+ case SQLCOM_SHOW_RELAYLOG_EVENTS:
+ {
+ List<Item> field_list;
+ Log_event::init_show_field_list(thd, &field_list);
+
+ if ((res= send_stmt_metadata(thd, stmt, &field_list)) == 2)
+ DBUG_RETURN(FALSE);
+ }
+ break;
#endif /* EMBEDDED_LIBRARY */
case SQLCOM_SHOW_CREATE_PROC:
if ((res= mysql_test_show_create_routine(stmt, &sp_handler_procedure)) == 2)
@@ -4207,6 +4217,15 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
thd->is_error() ||
init_param_array(this));
+ if (thd->security_ctx->password_expired &&
+ lex->sql_command != SQLCOM_SET_OPTION)
+ {
+ thd->restore_backup_statement(this, &stmt_backup);
+ thd->restore_active_arena(this, &stmt_backup);
+ thd->stmt_arena= old_stmt_arena;
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ DBUG_RETURN(true);
+ }
lex->set_trg_event_type_for_tables();
/*
@@ -4231,6 +4250,16 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
*/
MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint();
+ /*
+ Set variables specified by
+ SET STATEMENT var1=value1 [, var2=value2, ...] FOR <statement>
+ clause for duration of prepare phase. Original values of variable
+ listed in the SET STATEMENT clause is restored right after return
+ from the function check_prepared_statement()
+ */
+ if (likely(error == 0))
+ error= run_set_statement_if_requested(thd, lex);
+
/*
The only case where we should have items in the thd->free_list is
after stmt->set_params_from_vars(), which may in some cases create
@@ -4249,6 +4278,12 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_PREPARE;
}
+ /*
+ Restore original values of variables modified on handling
+ SET STATEMENT clause.
+ */
+ thd->lex->restore_set_statement_var();
+
/* The order is important */
lex->unit.cleanup();
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 5b4600ece9a..76fb9819fd5 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -416,6 +416,14 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
{
if (reinit_ssl())
result= 1;
+#ifdef WITH_WSREP
+ if (!result &&
+ WSREP_ON && wsrep_reload_ssl())
+ {
+ my_message(ER_UNKNOWN_ERROR, "Failed to refresh WSREP SSL.", MYF(0));
+ result= 1;
+ }
+#endif
}
if (options & REFRESH_GENERIC)
{
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 5203e0f52a5..6a6cfb2aa5f 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -3474,8 +3474,8 @@ static my_bool kill_callback(THD *thd, kill_callback_arg *arg)
thd->variables.server_id == arg->slave_server_id)
{
arg->thd= thd;
- if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
+ mysql_mutex_lock(&thd->LOCK_thd_data);
return 1;
}
return 0;
@@ -3496,7 +3496,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id)
*/
arg.thd->awake_no_mutex(KILL_SLAVE_SAME_ID);
mysql_mutex_unlock(&arg.thd->LOCK_thd_kill);
- if (WSREP(arg.thd)) mysql_mutex_unlock(&arg.thd->LOCK_thd_data);
+ mysql_mutex_unlock(&arg.thd->LOCK_thd_data);
}
}
@@ -4685,5 +4685,22 @@ rpl_gtid_pos_update(THD *thd, char *str, size_t len)
return false;
}
+int compare_log_name(const char *log_1, const char *log_2) {
+ int res= 1;
+ const char *ext1_str= strrchr(log_1, '.');
+ const char *ext2_str= strrchr(log_2, '.');
+ char file_name_1[255], file_name_2[255];
+ strmake(file_name_1, log_1, (ext1_str - log_1));
+ strmake(file_name_2, log_2, (ext2_str - log_2));
+ char *endptr = NULL;
+ res= strcmp(file_name_1, file_name_2);
+ if (!res)
+ {
+ ulong ext1= strtoul(++ext1_str, &endptr, 10);
+ ulong ext2= strtoul(++ext2_str, &endptr, 10);
+ res= (ext1 > ext2 ? 1 : ((ext1 == ext2) ? 0 : -1));
+ }
+ return res;
+}
#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 18aa7ea3fce..95916e31abf 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -45,6 +45,7 @@ bool show_binlogs(THD* thd);
extern int init_master_info(Master_info* mi);
void kill_zombie_dump_threads(uint32 slave_server_id);
int check_binlog_magic(IO_CACHE* log, const char** errmsg);
+int compare_log_name(const char *log_1, const char *log_2);
struct LOAD_FILE_IO_CACHE : public IO_CACHE
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9671880f1e0..52abaf29d05 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2020, MariaDB Corporation.
+ Copyright (c) 2009, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -300,6 +300,8 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab);
static Item **get_sargable_cond(JOIN *join, TABLE *table);
+bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item);
+
#ifndef DBUG_OFF
/*
@@ -348,7 +350,31 @@ bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
}
return FALSE;
}
-#endif
+#endif /* DBUG_OFF */
+
+/*
+ Intialize POSITION structure.
+*/
+
+POSITION::POSITION()
+{
+ table= 0;
+ records_read= cond_selectivity= read_time= 0.0;
+ prefix_record_count= 0.0;
+ key= 0;
+ use_join_buffer= 0;
+ sj_strategy= SJ_OPT_NONE;
+ n_sj_tables= 0;
+ spl_plan= 0;
+ range_rowid_filter_info= 0;
+ ref_depend_map= dups_producing_tables= 0;
+ inner_tables_handled_with_other_sjs= 0;
+ dups_weedout_picker.set_empty();
+ firstmatch_picker.set_empty();
+ loosescan_picker.set_empty();
+ sjmat_picker.set_empty();
+}
+
static void trace_table_dependencies(THD *thd,
JOIN_TAB *join_tabs, uint table_count)
@@ -634,7 +660,16 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex)
{
for (ORDER *ord= subq_select_lex->group_list.first; ord; ord= ord->next)
{
- (*ord->item)->walk(&Item::eliminate_subselect_processor, FALSE, NULL);
+ /*
+ Do not remove the item if it is used in select list and then referred
+ from GROUP BY clause by its name or number. Example:
+
+ select (select ... ) as SUBQ ... group by SUBQ
+
+ Here SUBQ cannot be removed.
+ */
+ if (!ord->in_field_list)
+ (*ord->item)->walk(&Item::eliminate_subselect_processor, FALSE, NULL);
}
subq_select_lex->join->group_list= NULL;
subq_select_lex->group_list.empty();
@@ -1166,22 +1201,6 @@ JOIN::prepare(TABLE_LIST *tables_init,
FALSE, SELECT_ACL, SELECT_ACL, FALSE))
DBUG_RETURN(-1);
- /*
- Permanently remove redundant parts from the query if
- 1) This is a subquery
- 2) This is the first time this query is optimized (since the
- transformation is permanent
- 3) Not normalizing a view. Removal should take place when a
- query involving a view is optimized, not when the view
- is created
- */
- if (select_lex->master_unit()->item && // 1)
- select_lex->first_cond_optimization && // 2)
- !thd->lex->is_view_context_analysis()) // 3)
- {
- remove_redundant_subquery_clauses(select_lex);
- }
-
/* System Versioning: handle FOR SYSTEM_TIME clause. */
if (select_lex->vers_setup_conds(thd, tables_list) < 0)
DBUG_RETURN(-1);
@@ -1264,6 +1283,23 @@ JOIN::prepare(TABLE_LIST *tables_init,
&hidden_group_fields,
&select_lex->select_n_reserved))
DBUG_RETURN(-1);
+
+ /*
+ Permanently remove redundant parts from the query if
+ 1) This is a subquery
+ 2) This is the first time this query is optimized (since the
+ transformation is permanent
+ 3) Not normalizing a view. Removal should take place when a
+ query involving a view is optimized, not when the view
+ is created
+ */
+ if (select_lex->master_unit()->item && // 1)
+ select_lex->first_cond_optimization && // 2)
+ !thd->lex->is_view_context_analysis()) // 3)
+ {
+ remove_redundant_subquery_clauses(select_lex);
+ }
+
/* Resolve the ORDER BY that was skipped, then remove it. */
if (skip_order_by && select_lex !=
select_lex->master_unit()->global_parameters())
@@ -1584,10 +1620,11 @@ bool JOIN::build_explain()
curr_tab->tracker= thd->lex->explain->get_union(select_nr)->
get_tmptable_read_tracker();
}
- else
+ else if (select_nr < INT_MAX)
{
- curr_tab->tracker= thd->lex->explain->get_select(select_nr)->
- get_using_temporary_read_tracker();
+ Explain_select *tmp= thd->lex->explain->get_select(select_nr);
+ if (tmp)
+ curr_tab->tracker= tmp->get_using_temporary_read_tracker();
}
}
DBUG_RETURN(0);
@@ -1807,7 +1844,7 @@ int JOIN::init_join_caches()
int
JOIN::optimize_inner()
{
- DBUG_ENTER("JOIN::optimize");
+ DBUG_ENTER("JOIN::optimize_inner");
subq_exit_fl= false;
do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
@@ -1880,6 +1917,10 @@ JOIN::optimize_inner()
table_count= select_lex->leaf_tables.elements;
+ if (select_lex->options & OPTION_SCHEMA_TABLE &&
+ optimize_schema_tables_memory_usage(select_lex->leaf_tables))
+ DBUG_RETURN(1);
+
if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */
DBUG_RETURN(-1);
@@ -2093,7 +2134,7 @@ JOIN::optimize_inner()
join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE &&
join->with_two_phase_optimization)
continue;
- /*
+ /*
Do not push conditions from where into materialized inner tables
of outer joins: this is not valid.
*/
@@ -2294,7 +2335,7 @@ setup_subq_exit:
if (with_two_phase_optimization)
optimization_state= JOIN::OPTIMIZATION_PHASE_1_DONE;
else
- {
+ {
if (optimize_stage2())
DBUG_RETURN(1);
}
@@ -2314,7 +2355,7 @@ int JOIN::optimize_stage2()
if (unlikely(thd->check_killed()))
DBUG_RETURN(1);
-
+
/* Generate an execution plan from the found optimal join order. */
if (get_best_combination())
DBUG_RETURN(1);
@@ -3961,7 +4002,7 @@ bool JOIN::setup_subquery_caches()
if (tmp_having)
{
DBUG_ASSERT(having == NULL);
- if (!(tmp_having=
+ if (!(tmp_having=
tmp_having->transform(thd,
&Item::expr_cache_insert_transformer,
NULL)))
@@ -4648,6 +4689,9 @@ mysql_select(THD *thd,
}
else
{
+ if (thd->lex->describe)
+ select_options|= SELECT_DESCRIBE;
+
/*
When in EXPLAIN, delay deleting the joins so that they are still
available when we're producing EXPLAIN EXTENDED warning text.
@@ -4901,6 +4945,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/* The following should be optimized to only clear critical things */
bzero((void*)stat, sizeof(JOIN_TAB)* table_count);
+
/* Initialize POSITION objects */
for (i=0 ; i <= table_count ; i++)
(void) new ((char*) (join->positions + i)) POSITION;
@@ -6889,7 +6934,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
Special treatment for ft-keys.
*/
-bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
+bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
bool skip_unprefixed_keyparts)
{
KEYUSE key_end, *prev, *save_pos, *use;
@@ -8060,7 +8105,7 @@ best_access_path(JOIN *join,
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
pos->range_rowid_filter_info= best_filter;
-
+
loose_scan_opt.save_to_position(s, loose_scan_pos);
if (!best_key &&
@@ -10204,7 +10249,7 @@ bool JOIN::check_two_phase_optimization(THD *thd)
return true;
return false;
}
-
+
bool JOIN::inject_cond_into_where(Item *injected_cond)
{
@@ -10235,7 +10280,7 @@ bool JOIN::inject_cond_into_where(Item *injected_cond)
and_args->push_back(elem, thd->mem_root);
}
}
-
+
return false;
}
@@ -13363,10 +13408,6 @@ void JOIN_TAB::cleanup()
{
DBUG_ENTER("JOIN_TAB::cleanup");
- if (tab_list && tab_list->is_with_table_recursive_reference() &&
- tab_list->with->is_cleaned())
- DBUG_VOID_RETURN;
-
DBUG_PRINT("enter", ("tab: %p table %s.%s",
this,
(table ? table->s->db.str : "?"),
@@ -13539,10 +13580,12 @@ ha_rows JOIN_TAB::get_examined_rows()
bool JOIN_TAB::preread_init()
{
TABLE_LIST *derived= table->pos_in_table_list;
+ DBUG_ENTER("JOIN_TAB::preread_init");
+
if (!derived || !derived->is_materialized_derived())
{
preread_init_done= TRUE;
- return FALSE;
+ DBUG_RETURN(FALSE);
}
/* Materialize derived table/view. */
@@ -13551,7 +13594,7 @@ bool JOIN_TAB::preread_init()
derived->get_unit()->uncacheable) &&
mysql_handle_single_derived(join->thd->lex,
derived, DT_CREATE | DT_FILL))
- return TRUE;
+ DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
derived->is_nonrecursive_derived_with_rec_ref())
@@ -13569,9 +13612,9 @@ bool JOIN_TAB::preread_init()
/* init ftfuns for just initialized derived table */
if (table->fulltext_searched)
if (init_ftfuncs(join->thd, join->select_lex, MY_TEST(join->order)))
- return TRUE;
+ DBUG_RETURN(TRUE);
- return FALSE;
+ DBUG_RETURN(FALSE);
}
@@ -14136,6 +14179,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
{
table_map order_tables=order->item[0]->used_tables();
if (order->item[0]->with_sum_func() ||
+ order->item[0]->with_window_func ||
/*
If the outer table of an outer join is const (either by itself or
after applying WHERE condition), grouping on a field from such a
@@ -14391,22 +14435,71 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
DBUG_RETURN(0);
}
-/*
- used only in JOIN::clear
+/**
+ used only in JOIN::clear (always) and in do_select()
+ (if there where no matching rows)
+
+ @param join JOIN
+ @param cleared_tables If not null, clear also const tables and mark all
+ cleared tables in the map. cleared_tables is only
+ set when called from do_select() when there is a
+ group function and there where no matching rows.
*/
-static void clear_tables(JOIN *join)
+
+static void clear_tables(JOIN *join, table_map *cleared_tables)
{
/*
- must clear only the non-const tables, as const tables
- are not re-calculated.
+ must clear only the non-const tables as const tables are not re-calculated.
*/
for (uint i= 0 ; i < join->table_count ; i++)
{
- if (!(join->table[i]->map & join->const_table_map))
- mark_as_null_row(join->table[i]); // All fields are NULL
+ TABLE *table= join->table[i];
+
+ if (table->null_row)
+ continue; // Nothing more to do
+ if (!(table->map & join->const_table_map) || cleared_tables)
+ {
+ if (cleared_tables)
+ {
+ (*cleared_tables)|= (((table_map) 1) << i);
+ if (table->s->null_bytes)
+ {
+ /*
+ Remember null bits for the record so that we can restore the
+ original const record in unclear_tables()
+ */
+ memcpy(table->record[1], table->null_flags, table->s->null_bytes);
+ }
+ }
+ mark_as_null_row(table); // All fields are NULL
+ }
+ }
+}
+
+
+/**
+ Reverse null marking for tables and restore null bits.
+
+ We have to do this because the tables may be re-used in a sub query
+ and the subquery will assume that the const tables contains the original
+ data before clear_tables().
+*/
+
+static void unclear_tables(JOIN *join, table_map *cleared_tables)
+{
+ for (uint i= 0 ; i < join->table_count ; i++)
+ {
+ if ((*cleared_tables) & (((table_map) 1) << i))
+ {
+ TABLE *table= join->table[i];
+ if (table->s->null_bytes)
+ memcpy(table->null_flags, table->record[1], table->s->null_bytes);
+ unmark_as_null_row(table);
+ }
}
}
+
/*****************************************************************************
Make som simple condition optimization:
If there is a test 'field = const' change all refs to 'field' to 'const'
@@ -15938,7 +16031,7 @@ static void update_const_equal_items(THD *thd, COND *cond, JOIN_TAB *tab,
Item_func::COND_AND_FUNC));
}
else if (cond->type() == Item::FUNC_ITEM &&
- ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
{
Item_equal *item_equal= (Item_equal *) cond;
bool contained_const= item_equal->get_const() != NULL;
@@ -16133,7 +16226,7 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
(((Item_func*) cond)->functype() == Item_func::EQ_FUNC ||
((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC))
{
- Item_func_eq *func=(Item_func_eq*) cond;
+ Item_bool_func2 *func= dynamic_cast<Item_bool_func2*>(cond);
Item **args= func->arguments();
bool left_const= args[0]->const_item() && !args[0]->is_expensive();
bool right_const= args[1]->const_item() && !args[1]->is_expensive();
@@ -17073,7 +17166,7 @@ void propagate_new_equalities(THD *thd, Item *cond,
}
}
else if (cond->type() == Item::FUNC_ITEM &&
- ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
{
Item_equal *equal_item;
List_iterator<Item_equal> it(*new_equalities);
@@ -17318,7 +17411,7 @@ Item_cond::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
}
else if (and_level &&
new_item->type() == Item::FUNC_ITEM &&
- ((Item_cond*) new_item)->functype() ==
+ ((Item_func*) new_item)->functype() ==
Item_func::MULT_EQUAL_FUNC)
{
li.remove();
@@ -17903,17 +17996,34 @@ Field *Item_field::create_tmp_field_ex(TABLE *table,
src->set_field(field);
if (!(result= create_tmp_field_from_item_field(table, NULL, param)))
return NULL;
- /*
- Fields that are used as arguments to the DEFAULT() function already have
- their data pointers set to the default value during name resolution. See
- Item_default_value::fix_fields.
- */
- if (type() != Item::DEFAULT_VALUE_ITEM && field->eq_def(result))
+ if (field->eq_def(result))
src->set_default_field(field);
return result;
}
+Field *Item_default_value::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ if (field->default_value && (field->flags & BLOB_FLAG))
+ {
+ /*
+ We have to use a copy function when using a blob with default value
+ as the we have to calculate the default value before we can use it.
+ */
+ get_tmp_field_src(src, param);
+ return tmp_table_field_from_field_type(table);
+ }
+ /*
+ Same code as in Item_field::create_tmp_field_ex, except no default field
+ handling
+ */
+ src->set_field(field);
+ return create_tmp_field_from_item_field(table, NULL, param);
+}
+
+
Field *Item_ref::create_tmp_field_ex(TABLE *table,
Tmp_field_src *src,
const Tmp_field_param *param)
@@ -18021,7 +18131,13 @@ Field *Item_func_sp::create_tmp_field_ex(TABLE *table,
the record in the original table.
If modify_item is 0 then fill_record() will update
the temporary table
-
+ @param table_cant_handle_bit_fields
+ Set to 1 if the temporary table cannot handle bit
+ fields. Only set for heap tables when the bit field
+ is part of an index.
+ @param make_copy_field
+ Set when using with rollup when we want to have
+ an exact copy of the field.
@retval
0 on error
@retval
@@ -19885,6 +20001,7 @@ do_select(JOIN *join, Procedure *procedure)
if (join->only_const_tables() && !join->need_tmp)
{
Next_select_func end_select= setup_end_select_func(join, NULL);
+
/*
HAVING will be checked after processing aggregate functions,
But WHERE should checked here (we alredy have read tables).
@@ -19911,12 +20028,29 @@ do_select(JOIN *join, Procedure *procedure)
}
else if (join->send_row_on_empty_set())
{
+ table_map cleared_tables= (table_map) 0;
+ if (end_select == end_send_group)
+ {
+ /*
+ Was a grouping query but we did not find any rows. In this case
+ we clear all tables to get null in any referenced fields,
+ like in case of:
+ SELECT MAX(a) AS f1, a AS f2 FROM t1 WHERE VALUE(a) IS NOT NULL
+ */
+ clear_tables(join, &cleared_tables);
+ }
if (!join->having || join->having->val_int())
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
join->fields);
rc= join->result->send_data(*columns_list) > 0;
}
+ /*
+ We have to remove the null markings from the tables as this table
+ may be part of a sub query that is re-evaluated
+ */
+ if (cleared_tables)
+ unclear_tables(join, &cleared_tables);
}
/*
An error can happen when evaluating the conds
@@ -20890,8 +21024,8 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
if ((table->null_row= MY_TEST((*tab->on_expr_ref)->val_int() == 0)))
mark_as_null_row(table);
}
- if (!table->null_row)
- table->maybe_null=0;
+ if (!table->null_row && ! tab->join->mixed_implicit_grouping)
+ table->maybe_null= 0;
{
JOIN *join= tab->join;
@@ -22398,6 +22532,21 @@ make_cond_for_table_from_pred(THD *thd, Item *root_cond, Item *cond,
cond->marker=3; // Checked when read
return (COND*) 0;
}
+ /*
+ If cond is an equality injected for split optimization then
+ a. when retain_ref_cond == false : cond is removed unconditionally
+ (cond that supports ref access is removed by the preceding code)
+ b. when retain_ref_cond == true : cond is removed if it does not
+ support ref access
+ */
+ if (left_item->type() == Item::FIELD_ITEM &&
+ is_eq_cond_injected_for_split_opt((Item_func_eq *) cond) &&
+ (!retain_ref_cond ||
+ !test_if_ref(root_cond, (Item_field*) left_item,right_item)))
+ {
+ cond->marker=3;
+ return (COND*) 0;
+ }
}
cond->marker=2;
cond->set_join_tab_idx(join_tab_idx_arg);
@@ -24036,7 +24185,7 @@ bool
cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
bool result= 0;
for (store_key **copy=ref->key_copy ; *copy ; copy++)
@@ -24047,7 +24196,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
break;
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return result;
}
@@ -25086,8 +25235,8 @@ copy_fields(TMP_TABLE_PARAM *param)
(*ptr->do_copy)(ptr);
List_iterator_fast<Item> it(param->copy_funcs);
- Item_copy_string *item;
- while ((item = (Item_copy_string*) it++))
+ Item_copy *item;
+ while ((item= (Item_copy*) it++))
item->copy();
}
@@ -25718,7 +25867,7 @@ bool JOIN::rollup_init()
{
if (!(rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd)))
return true;
-
+
List<Item> *rollup_fields= &rollup.fields[i];
rollup_fields->empty();
rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements);
@@ -26051,7 +26200,7 @@ int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABL
void JOIN::clear()
{
- clear_tables(this);
+ clear_tables(this, 0);
copy_fields(&tmp_table_param);
if (sum_funcs)
@@ -26228,7 +26377,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
{
JOIN_TAB *ctab= bush_children->start;
/* table */
- size_t len= my_snprintf(table_name_buffer,
+ size_t len= my_snprintf(table_name_buffer,
sizeof(table_name_buffer)-1,
"<subquery%d>",
ctab->emb_sj_nest->sj_subq_pred->get_identifier());
@@ -27403,7 +27552,7 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
DBUG_ASSERT(thd);
-
+
if (tvc)
{
tvc->print(thd, str, query_type);
@@ -28910,6 +29059,7 @@ select_handler *SELECT_LEX::find_select_handler(THD *thd)
}
+
/**
@} (end of group Query_Optimizer)
*/
diff --git a/sql/sql_select.h b/sql/sql_select.h
index e14907d73bc..d21d1bcc305 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -698,8 +698,6 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records);
-struct st_position;
-
class Semi_join_strategy_picker
{
public:
@@ -710,7 +708,7 @@ public:
Update internal state after another table has been added to the join
prefix
*/
- virtual void set_from_prev(struct st_position *prev) = 0;
+ virtual void set_from_prev(POSITION *prev) = 0;
virtual bool check_qep(JOIN *join,
uint idx,
@@ -720,7 +718,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos) = 0;
+ POSITION *loose_scan_pos) = 0;
virtual void mark_used() = 0;
@@ -751,7 +749,7 @@ public:
first_dupsweedout_table= MAX_TABLES;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
@@ -761,7 +759,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *stratey,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -797,7 +795,7 @@ public:
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -806,7 +804,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -815,6 +813,7 @@ public:
class LooseScan_picker : public Semi_join_strategy_picker
{
+public:
/* The first (i.e. driving) table we're doing loose scan for */
uint first_loosescan_table;
/*
@@ -833,14 +832,13 @@ class LooseScan_picker : public Semi_join_strategy_picker
uint loosescan_parts; /* Number of keyparts to be kept distinct */
bool is_used;
-public:
void set_empty()
{
first_loosescan_table= MAX_TABLES;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -849,19 +847,19 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend class Loose_scan_opt;
friend void best_access_path(JOIN *join,
JOIN_TAB *s,
table_map remaining_tables,
- const struct st_position *join_positions,
+ const POSITION *join_positions,
uint idx,
bool disable_jbuf,
double record_count,
- struct st_position *pos,
- struct st_position *loose_scan_pos);
+ POSITION *pos,
+ POSITION *loose_scan_pos);
friend bool get_best_combination(JOIN *join);
friend int setup_semijoin_loosescan(JOIN *join);
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -888,7 +886,7 @@ public:
sjm_scan_last_inner= 0;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -897,7 +895,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -912,8 +910,9 @@ class Rowid_filter;
Information about a position of table within a join order. Used in join
optimization.
*/
-typedef struct st_position
+class POSITION
{
+public:
/* The table that's put into join order */
JOIN_TAB *table;
@@ -925,7 +924,7 @@ typedef struct st_position
double records_read;
/* The selectivity of the pushed down conditions */
- double cond_selectivity;
+ double cond_selectivity;
/*
Cost accessing the table in course of the entire complete join execution,
@@ -934,8 +933,6 @@ typedef struct st_position
*/
double read_time;
- /* Cumulative cost and record count for the join prefix */
- Cost_estimate prefix_cost;
double prefix_record_count;
/*
@@ -944,29 +941,46 @@ typedef struct st_position
*/
KEYUSE *key;
+ /* Info on splitting plan used at this position */
+ SplM_plan_info *spl_plan;
+
+ /* Cost info for the range filter used at this position */
+ Range_rowid_filter_cost_info *range_rowid_filter_info;
+
/* If ref-based access is used: bitmap of tables this table depends on */
table_map ref_depend_map;
-
+
/*
- TRUE <=> join buffering will be used. At the moment this is based on
- *very* imprecise guesses made in best_access_path().
+ Bitmap of semi-join inner tables that are in the join prefix and for
+ which there's no provision for how to eliminate semi-join duplicates
+ they produce.
*/
- bool use_join_buffer;
-
+ table_map dups_producing_tables;
+
+ table_map inner_tables_handled_with_other_sjs;
+
+ Duplicate_weedout_picker dups_weedout_picker;
+ Firstmatch_picker firstmatch_picker;
+ LooseScan_picker loosescan_picker;
+ Sj_materialization_picker sjmat_picker;
+
+ /* Cumulative cost and record count for the join prefix */
+ Cost_estimate prefix_cost;
+
/*
Current optimization state: Semi-join strategy to be used for this
and preceding join tables.
-
+
Join optimizer sets this for the *last* join_tab in the
- duplicate-generating range. That is, in order to interpret this field,
+ duplicate-generating range. That is, in order to interpret this field,
one needs to traverse join->[best_]positions array from right to left.
When you see a join table with sj_strategy!= SJ_OPT_NONE, some other
- field (depending on the strategy) tells how many preceding positions
+ field (depending on the strategy) tells how many preceding positions
this applies to. The values of covered_preceding_positions->sj_strategy
must be ignored.
*/
enum sj_strategy_enum sj_strategy;
-
+
/*
Valid only after fix_semijoin_strategies_for_picked_join_order() call:
if sj_strategy!=SJ_OPT_NONE, this is the number of subsequent tables that
@@ -975,26 +989,12 @@ typedef struct st_position
uint n_sj_tables;
/*
- Bitmap of semi-join inner tables that are in the join prefix and for
- which there's no provision for how to eliminate semi-join duplicates
- they produce.
+ TRUE <=> join buffering will be used. At the moment this is based on
+ *very* imprecise guesses made in best_access_path().
*/
- table_map dups_producing_tables;
-
- table_map inner_tables_handled_with_other_sjs;
-
- Duplicate_weedout_picker dups_weedout_picker;
- Firstmatch_picker firstmatch_picker;
- LooseScan_picker loosescan_picker;
- Sj_materialization_picker sjmat_picker;
-
- /* Info on splitting plan used at this position */
- SplM_plan_info *spl_plan;
-
- /* Cost info for the range filter used at this position */
- Range_rowid_filter_cost_info *range_rowid_filter_info;
-
-} POSITION;
+ bool use_join_buffer;
+ POSITION();
+};
typedef Bounds_checked_array<Item_null_result*> Item_null_array;
@@ -1590,6 +1590,7 @@ public:
fields_list= fields_arg;
non_agg_fields.empty();
bzero((char*) &keyuse,sizeof(keyuse));
+ having_value= Item::COND_UNDEF;
tmp_table_param.init();
tmp_table_param.end_write_records= HA_POS_ERROR;
rollup.state= ROLLUP::STATE_NONE;
@@ -1948,8 +1949,8 @@ class store_key_field: public store_key
enum store_key_result copy_inner()
{
TABLE *table= copy_field.to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
/*
It looks like the next statement is needed only for a simplified
@@ -1960,7 +1961,7 @@ class store_key_field: public store_key
bzero(copy_field.to_ptr,copy_field.to_length);
copy_field.do_copy(&copy_field);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
null_key= to_field->is_null();
return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK;
}
@@ -1995,8 +1996,8 @@ public:
enum store_key_result copy_inner()
{
TABLE *table= to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
int res= FALSE;
/*
@@ -2017,7 +2018,7 @@ public:
*/
if (!res && table->in_use->is_error())
res= 1; /* STORE_KEY_FATAL */
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
null_key= to_field->is_null() || item->null_value;
return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL :
(store_key_result) res);
@@ -2053,8 +2054,8 @@ protected:
{
inited=1;
TABLE *table= to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
if ((res= item->save_in_field(to_field, 1)))
{
if (!err)
@@ -2066,7 +2067,7 @@ protected:
*/
if (!err && to_field->table->in_use->is_error())
err= 1; /* STORE_KEY_FATAL */
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
null_key= to_field->is_null() || item->null_value;
return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err);
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index a2bcfd5a4ff..83091cd67da 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -136,7 +136,7 @@ bool sequence_definition::check_and_adjust(bool set_reserved_until)
void sequence_definition::read_fields(TABLE *table)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
reserved_until= table->field[0]->val_int();
min_value= table->field[1]->val_int();
max_value= table->field[2]->val_int();
@@ -145,7 +145,7 @@ void sequence_definition::read_fields(TABLE *table)
cache= table->field[5]->val_int();
cycle= table->field[6]->val_int();
round= table->field[7]->val_int();
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
used_fields= ~(uint) 0;
print_dbug();
}
@@ -157,7 +157,7 @@ void sequence_definition::read_fields(TABLE *table)
void sequence_definition::store_fields(TABLE *table)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
/* zero possible delete markers & null bits */
memcpy(table->record[0], table->s->default_values, table->s->null_bytes);
@@ -170,7 +170,7 @@ void sequence_definition::store_fields(TABLE *table)
table->field[6]->store((longlong) cycle != 0, 0);
table->field[7]->store((longlong) round, 1);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
print_dbug();
}
@@ -527,12 +527,11 @@ int SEQUENCE::read_initial_values(TABLE *table)
int SEQUENCE::read_stored_values(TABLE *table)
{
int error;
- my_bitmap_map *save_read_set;
DBUG_ENTER("SEQUENCE::read_stored_values");
- save_read_set= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *save_read_set= tmp_use_all_columns(table, &table->read_set);
error= table->file->ha_read_first_row(table->record[0], MAX_KEY);
- tmp_restore_column_map(table->read_set, save_read_set);
+ tmp_restore_column_map(&table->read_set, save_read_set);
if (unlikely(error))
{
@@ -731,8 +730,8 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
if (real_increment > 0)
{
- if (reserved_until + add_to > max_value ||
- reserved_until > max_value - add_to)
+ if (reserved_until > max_value - add_to ||
+ reserved_until + add_to > max_value)
{
reserved_until= max_value + 1;
out_of_values= res_value >= reserved_until;
diff --git a/sql/sql_sequence.h b/sql/sql_sequence.h
index 2d609d8591b..29c589e67cd 100644
--- a/sql/sql_sequence.h
+++ b/sql/sql_sequence.h
@@ -111,8 +111,8 @@ public:
{
if (real_increment > 0)
{
- if (value + real_increment > max_value ||
- value > max_value - real_increment)
+ if (value > max_value - real_increment ||
+ value + real_increment > max_value)
value= max_value + 1;
else
value+= real_increment;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 1fdd2d7c8d0..6f0c9761b6c 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2147,7 +2147,6 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
!foreign_db_mode;
bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) &&
!create_info_arg;
- my_bitmap_map *old_map;
handlerton *hton;
int error= 0;
DBUG_ENTER("show_create_table");
@@ -2214,7 +2213,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
We have to restore the read_set if we are called from insert in case
of row based replication.
*/
- old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool not_the_first_field= false;
for (ptr=table->field ; (field= *ptr); ptr++)
@@ -2259,8 +2258,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
/*
For string types dump collation name only if
collation is not primary for the given charset
+
+ For generated fields don't print the COLLATE clause if
+ the collation matches the expression's collation.
*/
- if (!(field->charset()->state & MY_CS_PRIMARY) && !field->vcol_info)
+ if (!(field->charset()->state & MY_CS_PRIMARY) &&
+ (!field->vcol_info ||
+ field->charset() != field->vcol_info->expr->collation.collation))
{
packet->append(STRING_WITH_LEN(" COLLATE "));
packet->append(field->charset()->name);
@@ -2516,7 +2520,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
}
}
#endif
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(error);
}
@@ -3252,11 +3256,8 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond)
}
DBUG_RETURN(bres);
}
- else
- {
- my_error(ER_NO_SUCH_THREAD, MYF(0), (ulong) thread_id);
- DBUG_RETURN(1);
- }
+ my_error(ER_NO_SUCH_THREAD, MYF(0), (ulong) thread_id);
+ DBUG_RETURN(1);
}
@@ -3832,6 +3833,9 @@ static bool show_status_array(THD *thd, const char *wild,
if (show_type == SHOW_SYS)
mysql_mutex_lock(&LOCK_global_system_variables);
+ else if (show_type >= SHOW_LONG_STATUS && scope == OPT_GLOBAL)
+ calc_sum_of_all_status_if_needed(status_var);
+
pos= get_one_variable(thd, var, scope, show_type, status_var,
&charset, buff, &length);
@@ -3889,7 +3893,6 @@ uint calc_sum_of_all_status(STATUS_VAR *to)
calc_sum_callback_arg arg(to);
DBUG_ENTER("calc_sum_of_all_status");
- *to= global_status_var;
to->local_memory_used= 0;
/* Add to this status from existing threads */
server_threads.iterate(calc_sum_callback, &arg);
@@ -5099,7 +5102,8 @@ public:
Sql_condition::enum_warning_level *level,
const char* msg, Sql_condition ** cond_hdl)
{
- if (sql_errno == ER_TRG_NO_DEFINER || sql_errno == ER_TRG_NO_CREATION_CTX)
+ if (sql_errno == ER_TRG_NO_DEFINER || sql_errno == ER_TRG_NO_CREATION_CTX
+ || sql_errno == ER_PARSE_ERROR)
return true;
if (*level != Sql_condition::WARN_LEVEL_ERROR)
@@ -5297,6 +5301,12 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
continue;
}
+ if (thd->killed == ABORT_QUERY)
+ {
+ error= 0;
+ goto err;
+ }
+
DEBUG_SYNC(thd, "before_open_in_get_all_tables");
if (fill_schema_table_by_open(thd, &tmp_mem_root, FALSE,
table, schema_table,
@@ -5865,7 +5875,7 @@ static bool print_anchor_data_type(const Spvar_definition *def,
Let's print it according to the current sql_mode.
It will make output in line with the value in mysql.proc.param_list,
so both I_S.XXX.DTD_IDENTIFIER and mysql.proc.param_list use the same notation:
- default or Oracle, according to the sql_mode at the SP creation time.
+ default or Oracle, according to the sql_mode at the SP creation time.
The caller must make sure to set thd->variables.sql_mode to the routine sql_mode.
*/
static bool print_anchor_dtd_identifier(THD *thd, const Spvar_definition *def,
@@ -7086,8 +7096,7 @@ static bool store_trigger(THD *thd, Trigger *trigger,
(my_time_t)(trigger->create_time/100));
/* timestamp is with 6 digits */
timestamp.second_part= (trigger->create_time % 100) * 10000;
- ((Field_temporal_with_date*) table->field[16])->store_time_dec(&timestamp,
- 2);
+ table->field[16]->store_time_dec(&timestamp, 2);
}
sql_mode_string_representation(thd, trigger->sql_mode, &sql_mode_rep);
@@ -7969,10 +7978,7 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
if (partial_cond)
partial_cond->val_int();
- if (scope == OPT_GLOBAL)
- {
- calc_sum_of_all_status(&tmp);
- }
+ tmp.local_memory_used= 0; // meaning tmp was not populated yet
mysql_rwlock_rdlock(&LOCK_all_status_vars);
res= show_status_array(thd, wild,
@@ -8704,57 +8710,72 @@ end:
}
-static int optimize_schema_tables_memory_usage(TABLE_LIST *table_list)
+bool optimize_schema_tables_memory_usage(List<TABLE_LIST> &tables)
{
- TABLE *table= table_list->table;
- THD *thd=table->in_use;
- if (!table->is_created())
- {
- TMP_TABLE_PARAM *p= table_list->schema_table_param;
- TMP_ENGINE_COLUMNDEF *from_recinfo, *to_recinfo;
- DBUG_ASSERT(table->s->keys == 0);
- DBUG_ASSERT(table->s->uniques == 0);
-
- uchar *cur= table->field[0]->ptr;
- /* first recinfo could be a NULL bitmap, not an actual Field */
- from_recinfo= to_recinfo= p->start_recinfo + (cur != table->record[0]);
- for (uint i=0; i < table->s->fields; i++, from_recinfo++)
- {
- Field *field= table->field[i];
- DBUG_ASSERT(field->vcol_info == 0);
- DBUG_ASSERT(from_recinfo->length);
- DBUG_ASSERT(from_recinfo->length == field->pack_length_in_rec());
- if (bitmap_is_set(table->read_set, i))
+ DBUG_ENTER("optimize_schema_tables_memory_usage");
+
+ List_iterator<TABLE_LIST> tli(tables);
+
+ while (TABLE_LIST *table_list= tli++)
+ {
+ if (!table_list->schema_table)
+ continue;
+
+ TABLE *table= table_list->table;
+ THD *thd=table->in_use;
+
+ if (!thd->fill_information_schema_tables())
+ continue;
+
+ if (!table->is_created())
+ {
+ TMP_TABLE_PARAM *p= table_list->schema_table_param;
+ TMP_ENGINE_COLUMNDEF *from_recinfo, *to_recinfo;
+ DBUG_ASSERT(table->s->keys == 0);
+ DBUG_ASSERT(table->s->uniques == 0);
+
+ uchar *cur= table->field[0]->ptr;
+ /* first recinfo could be a NULL bitmap, not an actual Field */
+ from_recinfo= to_recinfo= p->start_recinfo + (cur != table->record[0]);
+ for (uint i=0; i < table->s->fields; i++, from_recinfo++)
{
- field->move_field(cur);
- *to_recinfo++= *from_recinfo;
- cur+= from_recinfo->length;
+ Field *field= table->field[i];
+ DBUG_ASSERT(field->vcol_info == 0);
+ DBUG_ASSERT(from_recinfo->length);
+ DBUG_ASSERT(from_recinfo->length == field->pack_length_in_rec());
+ if (bitmap_is_set(table->read_set, i))
+ {
+ field->move_field(cur);
+ *to_recinfo++= *from_recinfo;
+ cur+= from_recinfo->length;
+ }
+ else
+ {
+ field= new (thd->mem_root) Field_string(cur, 0, field->null_ptr,
+ field->null_bit, Field::NONE,
+ &field->field_name, field->dtcollation());
+ field->init(table);
+ field->field_index= i;
+ DBUG_ASSERT(field->pack_length_in_rec() == 0);
+ table->field[i]= field;
+ }
}
- else
+ if ((table->s->reclength= (ulong)(cur - table->record[0])) == 0)
{
- field= new (thd->mem_root) Field_string(cur, 0, field->null_ptr,
- field->null_bit, Field::NONE,
- &field->field_name, field->dtcollation());
- field->init(table);
- field->field_index= i;
- DBUG_ASSERT(field->pack_length_in_rec() == 0);
- table->field[i]= field;
+ /* all fields were optimized away. Force a non-0-length row */
+ table->s->reclength= to_recinfo->length= 1;
+ to_recinfo->type= FIELD_NORMAL;
+ to_recinfo++;
}
- }
- if ((table->s->reclength= (ulong)(cur - table->record[0])) == 0)
- {
- /* all fields were optimized away. Force a non-0-length row */
- table->s->reclength= to_recinfo->length= 1;
- to_recinfo++;
- }
- p->recinfo= to_recinfo;
+ p->recinfo= to_recinfo;
- // TODO switch from Aria to Memory if all blobs were optimized away?
- if (instantiate_tmp_table(table, p->keyinfo, p->start_recinfo, &p->recinfo,
- table_list->select_lex->options | thd->variables.option_bits))
- return 1;
+ // TODO switch from Aria to Memory if all blobs were optimized away?
+ if (instantiate_tmp_table(table, p->keyinfo, p->start_recinfo, &p->recinfo,
+ table_list->select_lex->options | thd->variables.option_bits))
+ DBUG_RETURN(1);
+ }
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -8778,9 +8799,6 @@ bool optimize_schema_tables_reads(JOIN *join)
TABLE_LIST *table_list= tab->table->pos_in_table_list;
if (table_list->schema_table && thd->fill_information_schema_tables())
{
- if (optimize_schema_tables_memory_usage(table_list))
- DBUG_RETURN(1);
-
/* A value of 0 indicates a dummy implementation */
if (table_list->schema_table->fill_table == 0)
continue;
@@ -8865,6 +8883,16 @@ bool get_schema_tables_result(JOIN *join,
if (table_list->schema_table->fill_table == 0)
continue;
+ /*
+ Do not fill in tables thare are marked as JT_CONST as these will never
+ be read and they also don't have a tab->read_record.table set!
+ This can happen with queries like
+ SELECT * FROM t1 LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.ROUTINES)
+ ON (t1b.a IS NULL);
+ */
+ if (tab->type == JT_CONST)
+ continue;
+
/* skip I_S optimizations specific to get_all_tables */
if (lex->describe &&
(table_list->schema_table->fill_table != get_all_tables))
@@ -9861,7 +9889,7 @@ ST_FIELD_INFO check_constraints_fields_info[]=
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
- {"CHECK_CLAUSE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
+ {"CHECK_CLAUSE", MAX_FIELD_VARCHARLENGTH , MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};
diff --git a/sql/sql_show.h b/sql/sql_show.h
index 39cbc35230a..c1845d8c1b3 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -238,6 +238,7 @@ public:
};
bool optimize_schema_tables_reads(JOIN *join);
+bool optimize_schema_tables_memory_usage(List<TABLE_LIST> &tables);
/* Handle the ignored database directories list for SHOW/I_S. */
bool ignore_db_dirs_init();
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 717deeabe18..042e86fbd86 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1022,15 +1022,14 @@ public:
void store_stat_fields()
{
- char buff[MAX_FIELD_WIDTH];
- String val(buff, sizeof(buff), &my_charset_bin);
- my_bitmap_map *old_map;
+ StringBuffer<MAX_FIELD_WIDTH> val;
- old_map= dbug_tmp_use_all_columns(stat_table, stat_table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(stat_table, &stat_table->read_set);
for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++)
{
Field *stat_field= stat_table->field[i];
- if (table_field->collected_stats->is_null(i))
+ Column_statistics *stats= table_field->collected_stats;
+ if (stats->is_null(i))
stat_field->set_null();
else
{
@@ -1038,10 +1037,10 @@ public:
switch (i) {
case COLUMN_STAT_MIN_VALUE:
if (table_field->type() == MYSQL_TYPE_BIT)
- stat_field->store(table_field->collected_stats->min_value->val_int(),true);
+ stat_field->store(stats->min_value->val_int(),true);
else
{
- table_field->collected_stats->min_value->val_str(&val);
+ stats->min_value->val_str(&val);
size_t length= Well_formed_prefix(val.charset(), val.ptr(),
MY_MIN(val.length(), stat_field->field_length)).length();
stat_field->store(val.ptr(), length, &my_charset_bin);
@@ -1049,42 +1048,38 @@ public:
break;
case COLUMN_STAT_MAX_VALUE:
if (table_field->type() == MYSQL_TYPE_BIT)
- stat_field->store(table_field->collected_stats->max_value->val_int(),true);
+ stat_field->store(stats->max_value->val_int(),true);
else
{
- table_field->collected_stats->max_value->val_str(&val);
+ stats->max_value->val_str(&val);
size_t length= Well_formed_prefix(val.charset(), val.ptr(),
MY_MIN(val.length(), stat_field->field_length)).length();
stat_field->store(val.ptr(), length, &my_charset_bin);
}
break;
case COLUMN_STAT_NULLS_RATIO:
- stat_field->store(table_field->collected_stats->get_nulls_ratio());
+ stat_field->store(stats->get_nulls_ratio());
break;
case COLUMN_STAT_AVG_LENGTH:
- stat_field->store(table_field->collected_stats->get_avg_length());
+ stat_field->store(stats->get_avg_length());
break;
case COLUMN_STAT_AVG_FREQUENCY:
- stat_field->store(table_field->collected_stats->get_avg_frequency());
+ stat_field->store(stats->get_avg_frequency());
break;
case COLUMN_STAT_HIST_SIZE:
- stat_field->store(table_field->collected_stats->histogram.get_size());
+ stat_field->store(stats->histogram.get_size());
break;
case COLUMN_STAT_HIST_TYPE:
- stat_field->store(table_field->collected_stats->histogram.get_type() +
- 1);
+ stat_field->store(stats->histogram.get_type() + 1);
break;
case COLUMN_STAT_HISTOGRAM:
- const char * col_histogram=
- (const char *) (table_field->collected_stats->histogram.get_values());
- stat_field->store(col_histogram,
- table_field->collected_stats->histogram.get_size(),
- &my_charset_bin);
+ stat_field->store((char *)stats->histogram.get_values(),
+ stats->histogram.get_size(), &my_charset_bin);
break;
}
}
}
- dbug_tmp_restore_column_map(stat_table->read_set, old_map);
+ dbug_tmp_restore_column_map(&stat_table->read_set, old_map);
}
@@ -1134,16 +1129,30 @@ public:
switch (i) {
case COLUMN_STAT_MIN_VALUE:
- table_field->read_stats->min_value->set_notnull();
- stat_field->val_str(&val);
- table_field->read_stats->min_value->store(val.ptr(), val.length(),
- &my_charset_bin);
+ table_field->read_stats->min_value->set_notnull();
+ if (table_field->type() == MYSQL_TYPE_BIT)
+ table_field->read_stats->min_value->store(stat_field->val_int(),
+ true);
+ else
+ {
+ stat_field->val_str(&val);
+ table_field->read_stats->min_value->store(val.ptr(),
+ val.length(),
+ &my_charset_bin);
+ }
break;
case COLUMN_STAT_MAX_VALUE:
- table_field->read_stats->max_value->set_notnull();
- stat_field->val_str(&val);
- table_field->read_stats->max_value->store(val.ptr(), val.length(),
- &my_charset_bin);
+ table_field->read_stats->max_value->set_notnull();
+ if (table_field->type() == MYSQL_TYPE_BIT)
+ table_field->read_stats->max_value->store(stat_field->val_int(),
+ true);
+ else
+ {
+ stat_field->val_str(&val);
+ table_field->read_stats->max_value->store(val.ptr(),
+ val.length(),
+ &my_charset_bin);
+ }
break;
case COLUMN_STAT_NULLS_RATIO:
table_field->read_stats->set_nulls_ratio(stat_field->val_real());
@@ -2097,20 +2106,24 @@ void create_min_max_statistical_fields_for_table_share(THD *thd,
int alloc_statistics_for_table(THD* thd, TABLE *table)
{
Field **field_ptr;
- uint fields;
DBUG_ENTER("alloc_statistics_for_table");
+ uint columns= 0;
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
+ columns++;
+ }
Table_statistics *table_stats=
(Table_statistics *) alloc_root(&table->mem_root,
sizeof(Table_statistics));
- fields= table->s->fields ;
Column_statistics_collected *column_stats=
(Column_statistics_collected *) alloc_root(&table->mem_root,
sizeof(Column_statistics_collected) *
- (fields+1));
+ columns);
uint keys= table->s->keys;
Index_statistics *index_stats=
@@ -2121,12 +2134,6 @@ int alloc_statistics_for_table(THD* thd, TABLE *table)
ulonglong *idx_avg_frequency= (ulonglong*) alloc_root(&table->mem_root,
sizeof(ulonglong) * key_parts);
- uint columns= 0;
- for (field_ptr= table->field; *field_ptr; field_ptr++)
- {
- if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
- columns++;
- }
uint hist_size= thd->variables.histogram_size;
Histogram_type hist_type= (Histogram_type) (thd->variables.histogram_type);
uchar *histogram= NULL;
@@ -2148,19 +2155,17 @@ int alloc_statistics_for_table(THD* thd, TABLE *table)
table_stats->idx_avg_frequency= idx_avg_frequency;
table_stats->histograms= histogram;
- memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
+ memset(column_stats, 0, sizeof(Column_statistics) * columns);
- for (field_ptr= table->field; *field_ptr; field_ptr++, column_stats++)
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
{
- (*field_ptr)->collected_stats= column_stats;
- (*field_ptr)->collected_stats->max_value= NULL;
- (*field_ptr)->collected_stats->min_value= NULL;
if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
{
column_stats->histogram.set_size(hist_size);
column_stats->histogram.set_type(hist_type);
column_stats->histogram.set_values(histogram);
histogram+= hist_size;
+ (*field_ptr)->collected_stats= column_stats++;
}
}
@@ -2655,7 +2660,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
- if (!bitmap_is_set(table->read_set, table_field->field_index))
+ if (!table_field->collected_stats)
continue;
table_field->collected_stats->init(thd, table_field);
}
@@ -2680,7 +2685,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
- if (!bitmap_is_set(table->read_set, table_field->field_index))
+ if (!table_field->collected_stats)
continue;
if ((rc= table_field->collected_stats->add()))
break;
@@ -2710,7 +2715,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
- if (!bitmap_is_set(table->read_set, table_field->field_index))
+ if (!table_field->collected_stats)
continue;
bitmap_set_bit(table->write_set, table_field->field_index);
if (!rc)
@@ -2814,7 +2819,7 @@ int update_statistics_for_table(THD *thd, TABLE *table)
for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
{
Field *table_field= *field_ptr;
- if (!bitmap_is_set(table->read_set, table_field->field_index))
+ if (!table_field->collected_stats)
continue;
restore_record(stat_table, s->default_values);
column_stat.set_key_fields(table_field);
@@ -3741,6 +3746,7 @@ double get_column_range_cardinality(Field *field,
if (!table->stats_is_read)
return tab_records;
+ THD *thd= table->in_use;
double col_nulls= tab_records * col_stats->get_nulls_ratio();
double col_non_nulls= tab_records - col_nulls;
@@ -3771,7 +3777,7 @@ double get_column_range_cardinality(Field *field,
col_stats->min_max_values_are_provided())
{
Histogram *hist= &col_stats->histogram;
- if (hist->is_available())
+ if (hist->is_usable(thd))
{
store_key_image_to_rec(field, (uchar *) min_endp->key,
field->key_length());
@@ -3815,10 +3821,10 @@ double get_column_range_cardinality(Field *field,
max_mp_pos= 1.0;
Histogram *hist= &col_stats->histogram;
- if (!hist->is_available())
- sel= (max_mp_pos - min_mp_pos);
- else
+ if (hist->is_usable(thd))
sel= hist->range_selectivity(min_mp_pos, max_mp_pos);
+ else
+ sel= (max_mp_pos - min_mp_pos);
res= col_non_nulls * sel;
set_if_bigger(res, col_stats->get_avg_frequency());
}
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 20ecf06bfee..35b3aa33acc 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -239,6 +239,17 @@ public:
bool is_available() { return get_size() > 0 && get_values(); }
+ /*
+ This function checks that histograms should be usable only when
+ 1) the level of optimizer_use_condition_selectivity > 3
+ 2) histograms have been collected
+ */
+ bool is_usable(THD *thd)
+ {
+ return thd->variables.optimizer_use_condition_selectivity > 3 &&
+ is_available();
+ }
+
void set_value(uint i, double val)
{
switch (type) {
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 6a52b7d418e..0249ae68cc3 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2010, 2020, MariaDB
+ Copyright (c) 2010, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -74,11 +74,12 @@ static int copy_data_between_tables(THD *, TABLE *,TABLE *,
Alter_info::enum_enable_or_disable,
Alter_table_ctx *);
static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
- uint *, handler *, KEY **, uint *, int);
+ uint *, handler *, KEY **, uint *, int,
+ const LEX_CSTRING db,
+ const LEX_CSTRING table_name);
static uint blob_length_by_type(enum_field_types type);
-static bool fix_constraints_names(THD *thd, List<Virtual_column_info>
- *check_constraint_list,
- const HA_CREATE_INFO *create_info);
+static bool fix_constraints_names(THD *, List<Virtual_column_info> *,
+ const HA_CREATE_INFO *);
/**
@brief Helper function for explain_filename
@@ -1825,7 +1826,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
if (mysql_prepare_create_table(lpt->thd, lpt->create_info, lpt->alter_info,
&lpt->db_options, lpt->table->file,
&lpt->key_info_buffer, &lpt->key_count,
- C_ALTER_TABLE))
+ C_ALTER_TABLE, lpt->db, lpt->table_name))
{
DBUG_RETURN(TRUE);
}
@@ -2304,8 +2305,11 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
{
bool is_trans= 0;
bool table_creation_was_logged= 0;
+ bool real_table= FALSE;
LEX_CSTRING db= table->db;
handlerton *table_type= 0;
+ // reset error state for this table
+ error= 0;
DBUG_PRINT("table", ("table_l: '%s'.'%s' table: %p s: %p",
table->db.str, table->table_name.str, table->table,
@@ -2321,9 +2325,35 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
thd->find_temporary_table(table) &&
table->mdl_request.ticket != NULL));
- if (table->open_type == OT_BASE_ONLY || !is_temporary_table(table) ||
- (drop_sequence && table->table->s->table_type != TABLE_TYPE_SEQUENCE))
+ if (table->open_type == OT_BASE_ONLY || !is_temporary_table(table))
+ real_table= TRUE;
+ else if (drop_sequence &&
+ table->table->s->table_type != TABLE_TYPE_SEQUENCE)
+ {
+ was_table= (table->table->s->table_type == TABLE_TYPE_NORMAL);
+ was_view= (table->table->s->table_type == TABLE_TYPE_VIEW);
+ if (if_exists)
+ {
+ char buff[FN_REFLEN];
+ String tbl_name(buff, sizeof(buff), system_charset_info);
+ tbl_name.length(0);
+ tbl_name.append(&db);
+ tbl_name.append('.');
+ tbl_name.append(&table->table_name);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_NOT_SEQUENCE2, ER_THD(thd, ER_NOT_SEQUENCE2),
+ tbl_name.c_ptr_safe());
+
+ /*
+ Our job is done here. This statement was added to avoid executing
+ unnecessary code farther below which in some strange corner cases
+ caused the server to crash (see MDEV-17896).
+ */
+ goto log_query;
+ }
error= 1;
+ goto non_critical_err;
+ }
else
{
table_creation_was_logged= table->table->s->table_creation_was_logged;
@@ -2332,29 +2362,28 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
error= 1;
goto err;
}
- error= 0;
table->table= 0;
}
- if ((drop_temporary && if_exists) || !error)
+ if ((drop_temporary && if_exists) || !real_table)
{
/*
This handles the case of temporary tables. We have the following cases:
. "DROP TEMPORARY" was executed and a temporary table was affected
- (i.e. drop_temporary && !error) or the if_exists was specified (i.e.
- drop_temporary && if_exists).
+ (i.e. drop_temporary && !real_table) or the
+ if_exists was specified (i.e. drop_temporary && if_exists).
. "DROP" was executed but a temporary table was affected (.i.e
- !error).
+ !real_table).
*/
if (!dont_log_query && table_creation_was_logged)
{
/*
- If there is an error, we don't know the type of the engine
+ If there is an real_table, we don't know the type of the engine
at this point. So, we keep it in the trx-cache.
*/
- is_trans= error ? TRUE : is_trans;
+ is_trans= real_table ? TRUE : is_trans;
if (is_trans)
trans_tmp_table_deleted= TRUE;
else
@@ -2381,7 +2410,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
is no need to proceed with the code that tries to drop a regular
table.
*/
- if (!error) continue;
+ if (!real_table) continue;
}
else if (!drop_temporary)
{
@@ -2397,7 +2426,6 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
reg_ext, 0);
}
DEBUG_SYNC(thd, "rm_table_no_locks_before_delete_table");
- error= 0;
if (drop_temporary ||
(ha_table_exists(thd, &db, &alias, &table_type, &is_sequence) == 0 &&
table_type == 0) ||
@@ -2437,6 +2465,11 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
{
non_tmp_error = (drop_temporary ? non_tmp_error : TRUE);
error= 1;
+ /*
+ non critical error (only for this table), so we continue.
+ Next we write it to wrong_tables and continue this loop
+ The same as "goto non_critical_err".
+ */
}
}
else
@@ -2530,7 +2563,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
}
non_tmp_error|= MY_TEST(error);
}
-
+non_critical_err:
if (error)
{
if (wrong_tables.length())
@@ -2895,6 +2928,15 @@ bool check_duplicates_in_interval(const char *set_or_name,
}
+bool Column_definition::
+ prepare_charset_for_string(const Column_derived_attributes *dattr)
+{
+ if (!charset)
+ charset= dattr->charset();
+ return (flags & BINCMP_FLAG) && !(charset= find_bin_collation(charset));
+}
+
+
bool Column_definition::prepare_stage2_blob(handler *file,
ulonglong table_flags,
uint field_flags)
@@ -2976,38 +3018,6 @@ bool Column_definition::prepare_stage2(handler *file,
}
-/*
- Get character set from field object generated by parser using
- default values when not set.
-
- SYNOPSIS
- get_sql_field_charset()
- sql_field The sql_field object
- create_info Info generated by parser
-
- RETURN VALUES
- cs Character set
-*/
-
-CHARSET_INFO* get_sql_field_charset(Column_definition *sql_field,
- HA_CREATE_INFO *create_info)
-{
- CHARSET_INFO *cs= sql_field->charset;
-
- if (!cs)
- cs= create_info->default_table_charset;
- /*
- table_charset is set only in ALTER TABLE t1 CONVERT TO CHARACTER SET csname
- if we want change character set for all varchar/char columns.
- But the table charset must not affect the BLOB fields, so don't
- allow to change my_charset_bin to somethig else.
- */
- if (create_info->table_charset && cs != &my_charset_bin)
- cs= create_info->table_charset;
- return cs;
-}
-
-
/**
Modifies the first column definition whose SQL type is TIMESTAMP
by adding the features DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP.
@@ -3180,11 +3190,14 @@ bool Column_definition::prepare_stage1_bit(THD *thd,
bool Column_definition::prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
handler *file,
- ulonglong table_flags)
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
{
return type_handler()->Column_definition_prepare_stage1(thd, mem_root,
this, file,
- table_flags);
+ table_flags,
+ derived_attr);
}
@@ -3381,7 +3394,8 @@ static int
mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
Alter_info *alter_info, uint *db_options,
handler *file, KEY **key_info_buffer,
- uint *key_count, int create_table_mode)
+ uint *key_count, int create_table_mode,
+ const LEX_CSTRING db, const LEX_CSTRING table_name)
{
const char *key_name;
Create_field *sql_field,*dup_field;
@@ -3396,7 +3410,11 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
uint total_uneven_bit_length= 0;
int select_field_count= C_CREATE_SELECT(create_table_mode);
bool tmp_table= create_table_mode == C_ALTER_TABLE;
+ const bool create_simple= thd->lex->create_simple();
bool is_hash_field_needed= false;
+ const Column_derived_attributes dattr(create_info->default_table_charset);
+ const Column_bulk_alter_attributes
+ battr(create_info->alter_table_convert_to_charset);
DBUG_ENTER("mysql_prepare_create_table");
DBUG_EXECUTE_IF("test_pseudo_invisible",{
@@ -3453,26 +3471,27 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
for (field_no=0; (sql_field=it++) ; field_no++)
{
+ /* Virtual fields are always NULL */
+ if (sql_field->vcol_info)
+ sql_field->flags&= ~NOT_NULL_FLAG;
+
/*
Initialize length from its original value (number of characters),
which was set in the parser. This is necessary if we're
executing a prepared statement for the second time.
*/
sql_field->length= sql_field->char_length;
- /* Set field charset. */
- sql_field->charset= get_sql_field_charset(sql_field, create_info);
- if ((sql_field->flags & BINCMP_FLAG) &&
- !(sql_field->charset= find_bin_collation(sql_field->charset)))
- DBUG_RETURN(true);
- /* Virtual fields are always NULL */
- if (sql_field->vcol_info)
- sql_field->flags&= ~NOT_NULL_FLAG;
+ if (sql_field->bulk_alter(&dattr, &battr))
+ DBUG_RETURN(true);
if (sql_field->prepare_stage1(thd, thd->mem_root,
- file, file->ha_table_flags()))
+ file, file->ha_table_flags(),
+ &dattr))
DBUG_RETURN(true);
+ DBUG_ASSERT(sql_field->charset);
+
if (sql_field->real_field_type() == MYSQL_TYPE_BIT &&
file->ha_table_flags() & HA_CAN_BIT_FIELD)
total_uneven_bit_length+= sql_field->length & 7;
@@ -3523,7 +3542,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (!(sql_field->flags & NOT_NULL_FLAG))
null_fields--;
- if (sql_field->redefine_stage1(dup_field, file, create_info))
+ if (sql_field->redefine_stage1(dup_field, file))
DBUG_RETURN(true);
it2.remove(); // Remove first (create) definition
@@ -4040,8 +4059,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_TOO_LONG_KEY,
- ER_THD(thd, ER_TOO_LONG_KEY),
+ ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
@@ -4085,7 +4103,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
key_part_length= file->max_key_part_length();
/* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
@@ -4242,6 +4260,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
create_info->null_bits= null_fields;
/* Check fields. */
+ Item::Check_table_name_prm walk_prm(db, table_name);
it.rewind();
while ((sql_field=it++))
{
@@ -4296,6 +4315,37 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->field_name.str);
DBUG_RETURN(TRUE);
}
+
+ if (create_simple)
+ {
+ /*
+ NOTE: we cannot do this in check_vcol_func_processor() as there is already
+ no table name qualifier in expression.
+ */
+ if (sql_field->vcol_info && sql_field->vcol_info->expr &&
+ sql_field->vcol_info->expr->walk(&Item::check_table_name_processor,
+ false, (void *) &walk_prm))
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), walk_prm.field.c_ptr(), "GENERATED ALWAYS");
+ DBUG_RETURN(TRUE);
+ }
+
+ if (sql_field->default_value &&
+ sql_field->default_value->expr->walk(&Item::check_table_name_processor,
+ false, (void *) &walk_prm))
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), walk_prm.field.c_ptr(), "DEFAULT");
+ DBUG_RETURN(TRUE);
+ }
+
+ if (sql_field->check_constraint &&
+ sql_field->check_constraint->expr->walk(&Item::check_table_name_processor,
+ false, (void *) &walk_prm))
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), walk_prm.field.c_ptr(), "CHECK");
+ DBUG_RETURN(TRUE);
+ }
+ }
}
/* Check table level constraints */
@@ -4305,6 +4355,12 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
Virtual_column_info *check;
while ((check= c_it++))
{
+ if (create_simple && check->expr->walk(&Item::check_table_name_processor, false,
+ (void *) &walk_prm))
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), walk_prm.field.c_ptr(), "CHECK");
+ DBUG_RETURN(TRUE);
+ }
if (!check->name.length || check->automatic_name)
continue;
@@ -4315,8 +4371,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
const Virtual_column_info *dup_check;
while ((dup_check= dup_it++) && dup_check != check)
{
- if (!dup_check->name.length || dup_check->automatic_name)
- continue;
if (!lex_string_cmp(system_charset_info,
&check->name, &dup_check->name))
{
@@ -4366,7 +4420,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
ER_ILLEGAL_HA_CREATE_OPTION,
ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION),
file->engine_name()->str,
- "TRANSACTIONAL=1");
+ create_info->transactional == HA_CHOICE_YES
+ ? "TRANSACTIONAL=1" : "TRANSACTIONAL=0");
if (parse_option_list(thd, file->partition_ht(), &create_info->option_struct,
&create_info->option_list,
@@ -4540,7 +4595,9 @@ bool Column_definition::prepare_blob_field(THD *thd)
bool Column_definition::sp_prepare_create_field(THD *thd, MEM_ROOT *mem_root)
{
- return prepare_stage1(thd, mem_root, NULL, HA_CAN_GEOMETRY) ||
+ DBUG_ASSERT(charset);
+ const Column_derived_attributes dattr(&my_charset_bin);
+ return prepare_stage1(thd, mem_root, NULL, HA_CAN_GEOMETRY, &dattr) ||
prepare_stage2(NULL, HA_CAN_GEOMETRY);
}
@@ -4829,7 +4886,8 @@ handler *mysql_create_frm_image(THD *thd, const LEX_CSTRING &db,
}
if (mysql_prepare_create_table(thd, create_info, alter_info, &db_options,
- file, key_info, key_count, create_table_mode))
+ file, key_info, key_count,
+ create_table_mode, db, table_name))
goto err;
create_info->table_options=db_options;
@@ -5267,6 +5325,9 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
if (!opt_explicit_defaults_for_timestamp)
promote_first_timestamp_column(&alter_info->create_list);
+ /* We can abort create table for any table type */
+ thd->abort_on_warning= thd->is_strict_mode();
+
if (mysql_create_table_no_lock(thd, &create_table->db,
&create_table->table_name, create_info,
alter_info,
@@ -5304,6 +5365,8 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
}
err:
+ thd->abort_on_warning= 0;
+
/* In RBR or readonly server we don't need to log CREATE TEMPORARY TABLE */
if (!result && create_info->tmp_table() &&
(thd->is_current_stmt_binlog_format_row() || (opt_readonly && !thd->slave_thread)))
@@ -7300,13 +7363,15 @@ bool mysql_compare_tables(TABLE *table,
Alter_info tmp_alter_info(*alter_info, thd->mem_root);
uint db_options= 0; /* not used */
KEY *key_info_buffer= NULL;
+ LEX_CSTRING db= { table->s->db.str, table->s->db.length };
+ LEX_CSTRING table_name= { table->s->db.str, table->s->table_name.length };
/* Create the prepared information. */
int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ?
C_ORDINARY_CREATE : C_ALTER_TABLE;
if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info,
&db_options, table->file, &key_info_buffer,
- &key_count, create_table_mode))
+ &key_count, create_table_mode, db, table_name))
DBUG_RETURN(1);
/* Some very basic checks. */
@@ -8298,7 +8363,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
def->real_field_type() == MYSQL_TYPE_NEWDATE ||
def->real_field_type() == MYSQL_TYPE_DATETIME ||
def->real_field_type() == MYSQL_TYPE_DATETIME2) &&
- !alter_ctx->datetime_field &&
+ !alter_ctx->datetime_field && !def->field &&
!(~def->flags & (NO_DEFAULT_VALUE_FLAG | NOT_NULL_FLAG)) &&
thd->variables.sql_mode & MODE_NO_ZERO_DATE)
{
@@ -8431,6 +8496,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
long_hash_key= true;
}
const char *dropped_key_part= NULL;
+ bool user_keyparts= false; // some user-defined keyparts left
KEY_PART_INFO *key_part= key_info->key_part;
key_parts.empty();
bool delete_index_stat= FALSE;
@@ -8506,6 +8572,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
key_parts.push_back(new (thd->mem_root) Key_part_spec(&cfield->field_name,
key_part_length, true),
thd->mem_root);
+ if (!(cfield->invisible == INVISIBLE_SYSTEM && cfield->vers_sys_field()))
+ user_keyparts= true;
}
if (table->s->tmp_table == NO_TMP_TABLE)
{
@@ -8516,6 +8584,14 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
(void) delete_statistics_for_index(thd, table, key_info, TRUE);
}
+ if (!user_keyparts && key_parts.elements)
+ {
+ /*
+ If we dropped all user key-parts we also drop implicit system fields.
+ */
+ key_parts.empty();
+ }
+
if (key_parts.elements)
{
KEY_CREATE_INFO key_create_info;
@@ -8650,37 +8726,28 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
}
- // NB: `check` is TABLE resident, we must keep it intact.
- if (keep)
- {
- check= check->clone(thd);
- if (!check)
- {
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- goto err;
- }
- }
-
if (share->period.constr_name.streq(check->name.str))
{
- if (drop_period)
- {
- keep= false;
- }
- else if(!keep)
+ if (!drop_period && !keep)
{
my_error(ER_PERIOD_CONSTRAINT_DROP, MYF(0), check->name.str,
share->period.name.str);
goto err;
}
- else
+ keep= keep && !drop_period;
+
+ DBUG_ASSERT(create_info->period_info.constr == NULL || drop_period);
+
+ if (keep)
{
- DBUG_ASSERT(create_info->period_info.constr == NULL);
+ Item *expr_copy= check->expr->get_copy(thd);
+ check= new Virtual_column_info();
+ check->name= share->period.constr_name;
+ check->automatic_name= true;
+ check->expr= expr_copy;
create_info->period_info.constr= check;
- create_info->period_info.constr->automatic_name= true;
}
}
-
/* see if the constraint depends on *only* on dropped fields */
if (keep && dropped_fields)
{
@@ -9563,7 +9630,8 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
If such table exists, there must be a corresponding TABLE_SHARE in
THD::all_temp_tables list.
*/
- if (thd->find_tmp_table_share(alter_ctx.new_db.str, alter_ctx.new_name.str))
+ if (thd->find_tmp_table_share(alter_ctx.new_db.str,
+ alter_ctx.new_name.str))
{
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias.str);
DBUG_RETURN(true);
@@ -9704,6 +9772,17 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
if (table->s->tmp_table == NO_TMP_TABLE)
mysql_audit_alter_table(thd, table_list);
+ else if (table->s->table_creation_was_logged && mysql_bin_log.is_open())
+ {
+ /* Protect against MDL error in binary logging */
+ MDL_request mdl_request;
+ DBUG_ASSERT(!mdl_ticket);
+ mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT,
+ MDL_TRANSACTION);
+ if (thd->mdl_context.acquire_lock(&mdl_request,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(true);
+ }
THD_STAGE_INFO(thd, stage_setup);
@@ -9783,9 +9862,12 @@ do_continue:;
thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, 0L, 0L, alter_ctx.tmp_buff);
- /* We don't replicate alter table statement on temporary tables */
+ /*
+ We don't replicate alter table statement on temporary tables
+ For which we did not log the CREATE TEMPORARY TABLE statement.
+ */
if (table->s->tmp_table == NO_TMP_TABLE ||
- !thd->is_current_stmt_binlog_format_row())
+ table->s->table_creation_was_logged)
{
if (write_bin_log(thd, true, thd->query(), thd->query_length()))
DBUG_RETURN(true);
@@ -10031,6 +10113,7 @@ do_continue:;
tmp_disable_binlog(thd);
create_info->options|=HA_CREATE_TMP_ALTER;
+ create_info->alias= alter_ctx.table_name;
error= create_table_impl(thd, alter_ctx.db, alter_ctx.table_name,
alter_ctx.new_db, alter_ctx.tmp_name,
alter_ctx.get_tmp_path(),
@@ -11305,8 +11388,8 @@ bool Sql_cmd_create_table_like::execute(THD *thd)
{
create_info.used_fields&= ~HA_CREATE_USED_CHARSET;
create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET;
- create_info.default_table_charset= create_info.table_charset;
- create_info.table_charset= 0;
+ create_info.default_table_charset= create_info.alter_table_convert_to_charset;
+ create_info.alter_table_convert_to_charset= 0;
}
/*
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 35bff0873ea..62b61684286 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -252,8 +252,6 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
const char *table_path=0);
void close_cached_table(THD *thd, TABLE *table);
void sp_prepare_create_field(THD *thd, Column_definition *sql_field);
-CHARSET_INFO* get_sql_field_charset(Column_definition *sql_field,
- HA_CREATE_INFO *create_info);
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
int write_bin_log(THD *thd, bool clear_error,
char const *query, ulong query_length,
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 08dc137bebe..5ea132c83d4 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -294,7 +294,6 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
double current_read_time, const char *info)
{
uint i;
- POSITION pos;
JOIN_TAB *join_table;
JOIN_TAB **plan_nodes;
TABLE* table;
@@ -321,8 +320,8 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
fputs(" POSITIONS: ", DBUG_FILE);
for (i= 0; i < idx ; i++)
{
- pos = join->positions[i];
- table= pos.table->table;
+ POSITION *pos= join->positions + i;
+ table= pos->table->table;
if (table)
fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
@@ -338,8 +337,8 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
fputs("BEST_POSITIONS: ", DBUG_FILE);
for (i= 0; i < idx ; i++)
{
- pos= join->best_positions[i];
- table= pos.table->table;
+ POSITION *pos= join->best_positions + i;
+ table= pos->table->table;
if (table)
fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
@@ -565,6 +564,7 @@ void mysql_print_status()
STATUS_VAR tmp;
uint count;
+ tmp= global_status_var;
count= calc_sum_of_all_status(&tmp);
printf("\nStatus information:\n\n");
(void) my_getwd(current_dir, sizeof(current_dir),MYF(0));
@@ -616,8 +616,12 @@ Next alarm time: %lu\n",
(ulong)alarm_info.next_alarm_time);
#endif
display_table_locks();
-#ifdef HAVE_MALLINFO
+#if defined(HAVE_MALLINFO2)
+ struct mallinfo2 info = mallinfo2();
+#elif defined(HAVE_MALLINFO)
struct mallinfo info= mallinfo();
+#endif
+#if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
char llbuff[10][22];
printf("\nMemory status:\n\
Non-mmapped space allocated from system: %s\n\
diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc
index c3d347307a2..c495a417961 100644
--- a/sql/sql_truncate.cc
+++ b/sql/sql_truncate.cc
@@ -416,20 +416,23 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
bool hton_can_recreate;
#ifdef WITH_WSREP
- if (WSREP(thd))
+ if (WSREP(thd) && wsrep_thd_is_local(thd))
{
wsrep::key_array keys;
- wsrep_append_fk_parent_table(thd, table_ref, &keys);
- if (keys.empty())
+ /* Do not start TOI if table is not found */
+ if (!wsrep_append_fk_parent_table(thd, table_ref, &keys))
{
- WSREP_TO_ISOLATION_BEGIN_IF(table_ref->db.str, table_ref->table_name.str, NULL)
+ if (keys.empty())
{
- DBUG_RETURN(TRUE);
- }
- } else {
- WSREP_TO_ISOLATION_BEGIN_FK_TABLES(NULL, NULL, table_ref, &keys)
- {
- DBUG_RETURN(TRUE);
+ WSREP_TO_ISOLATION_BEGIN_IF(table_ref->db.str, table_ref->table_name.str, NULL)
+ {
+ DBUG_RETURN(TRUE);
+ }
+ } else {
+ WSREP_TO_ISOLATION_BEGIN_FK_TABLES(NULL, NULL, table_ref, &keys)
+ {
+ DBUG_RETURN(TRUE);
+ }
}
}
}
@@ -462,6 +465,15 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
*/
error= handler_truncate(thd, table_ref, FALSE);
+ if (error == TRUNCATE_OK && thd->locked_tables_mode &&
+ (table_ref->table->file->ht->flags &
+ HTON_REQUIRES_CLOSE_AFTER_TRUNCATE))
+ {
+ thd->locked_tables_list.mark_table_for_reopen(thd, table_ref->table);
+ if (unlikely(thd->locked_tables_list.reopen_tables(thd, true)))
+ thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
+ }
+
/*
All effects of a TRUNCATE TABLE operation are committed even if
truncation fails in the case of non transactional tables. Thus, the
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index 916047c4b0f..63e9e76e135 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -47,7 +47,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast<List_item> &li)
while ((lst= li++))
{
- List_iterator_fast<Item> it(*lst);
+ List_iterator<Item> it(*lst);
Item *item;
while ((item= it++))
@@ -59,7 +59,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast<List_item> &li)
while replacing their values to NAME_CONST()s.
So fix only those that have not been.
*/
- if (item->fix_fields_if_needed(thd, 0) ||
+ if (item->fix_fields_if_needed_for_scalar(thd, it.ref()) ||
item->check_is_evaluable_expression_or_error())
DBUG_RETURN(true);
}
@@ -341,6 +341,13 @@ int table_value_constr::save_explain_data_intern(THD *thd,
if (select_lex->master_unit()->derived)
explain->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
+ for (SELECT_LEX_UNIT *unit= select_lex->first_inner_unit();
+ unit;
+ unit= unit->next_unit())
+ {
+ explain->add_child(unit->first_select()->select_number);
+ }
+
output->add_node(explain);
if (select_lex->is_top_level_node())
@@ -365,9 +372,14 @@ bool table_value_constr::optimize(THD *thd)
thd->lex->explain && // for "SET" command in SPs.
(!thd->lex->explain->get_select(select_lex->select_number)))
{
- return save_explain_data_intern(thd, thd->lex->explain);
+ if (save_explain_data_intern(thd, thd->lex->explain))
+ return true;
}
- return 0;
+
+ if (select_lex->optimize_unflattened_subqueries(true))
+ return true;
+
+ return false;
}
@@ -635,50 +647,69 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
st_select_lex *parent_select)
{
LEX *lex= thd->lex;
- select_result *save_result= thd->lex->result;
+ select_result *save_result= lex->result;
uint8 save_derived_tables= lex->derived_tables;
thd->lex->result= NULL;
Query_arena backup;
Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ Item *item;
+ SELECT_LEX *wrapper_sl;
+ SELECT_LEX_UNIT *derived_unit;
+
/*
- Create SELECT_LEX of the select used in the result of transformation
+ Create SELECT_LEX wrapper_sl of the select used in the result
+ of the transformation
*/
- lex->current_select= tvc_sl;
- if (mysql_new_select(lex, 0, NULL))
+ if (!(wrapper_sl= new (thd->mem_root) SELECT_LEX()))
goto err;
- mysql_init_select(lex);
- /* Create item list as '*' for the subquery SQ */
- Item *item;
- SELECT_LEX *wrapper_sl;
- wrapper_sl= lex->current_select;
+ wrapper_sl->select_number= ++thd->lex->stmt_lex->current_select_number;
+ wrapper_sl->parent_lex= lex; /* Used in init_query. */
+ wrapper_sl->init_query();
+ wrapper_sl->init_select();
+
+ wrapper_sl->nest_level= tvc_sl->nest_level;
+ wrapper_sl->parsing_place= tvc_sl->parsing_place;
wrapper_sl->set_linkage(tvc_sl->get_linkage());
- wrapper_sl->parsing_place= SELECT_LIST;
+ wrapper_sl->exclude_from_table_unique_test=
+ tvc_sl->exclude_from_table_unique_test;
+
+ lex->current_select= wrapper_sl;
item= new (thd->mem_root) Item_field(thd, &wrapper_sl->context,
NULL, NULL, &star_clex_str);
if (item == NULL || add_item_to_list(thd, item))
goto err;
(wrapper_sl->with_wild)++;
-
- /* Exclude SELECT with TVC */
- tvc_sl->exclude();
+
+ /* Include the newly created select into the global list of selects */
+ wrapper_sl->include_global((st_select_lex_node**)&lex->all_selects_list);
+
+ /* Substitute select node used of TVC for the newly created select */
+ tvc_sl->substitute_in_tree(wrapper_sl);
+
/*
- Create derived table DT that will wrap TVC in the result of transformation
+ Create a unit for the substituted select used for TVC and attach it
+ to the the wrapper select wrapper_sl as the only unit. The created
+ unit is the unit for the derived table tvc_x of the transformation.
*/
- SELECT_LEX *tvc_select; // select for tvc
- SELECT_LEX_UNIT *derived_unit; // unit for tvc_select
- if (mysql_new_select(lex, 1, tvc_sl))
+ if (!(derived_unit= new (thd->mem_root) SELECT_LEX_UNIT()))
goto err;
- tvc_select= lex->current_select;
- derived_unit= tvc_select->master_unit();
- tvc_select->set_linkage(DERIVED_TABLE_TYPE);
+ derived_unit->init_query();
+ derived_unit->thd= thd;
+ derived_unit->include_down(wrapper_sl);
- lex->current_select= wrapper_sl;
+ /*
+ Attach the select used of TVC as the only slave to the unit for
+ the derived table tvc_x of the transformation
+ */
+ derived_unit->add_slave(tvc_sl);
+ tvc_sl->set_linkage(DERIVED_TABLE_TYPE);
/*
- Create the name of the wrapping derived table and
- add it to the FROM list of the wrapper
- */
+ Generate the name of the derived table created for TVC and
+ add it to the FROM list of the wrapping select
+ */
Table_ident *ti;
LEX_CSTRING alias;
TABLE_LIST *derived_tab;
@@ -697,19 +728,15 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
wrapper_sl->table_list.first->derived_type= DTYPE_TABLE | DTYPE_MATERIALIZE;
lex->derived_tables|= DERIVED_SUBQUERY;
- wrapper_sl->where= 0;
- wrapper_sl->set_braces(false);
- derived_unit->set_with_clause(0);
-
if (arena)
thd->restore_active_arena(arena, &backup);
- thd->lex->result= save_result;
+ lex->result= save_result;
return wrapper_sl;
err:
if (arena)
thd->restore_active_arena(arena, &backup);
- thd->lex->result= save_result;
+ lex->result= save_result;
lex->derived_tables= save_derived_tables;
return 0;
}
@@ -778,11 +805,12 @@ st_select_lex *wrap_tvc_with_tail(THD *thd, st_select_lex *tvc_sl)
SELECT * FROM (VALUES (v1), ... (vn)) tvc_x
and replaces the subselect with the result of the transformation.
- @retval false if successfull
- true otherwise
+ @retval wrapping select if successful
+ 0 otherwise
*/
-bool Item_subselect::wrap_tvc_into_select(THD *thd, st_select_lex *tvc_sl)
+st_select_lex *
+Item_subselect::wrap_tvc_into_select(THD *thd, st_select_lex *tvc_sl)
{
LEX *lex= thd->lex;
/* SELECT_LEX object where the transformation is performed */
@@ -792,14 +820,9 @@ bool Item_subselect::wrap_tvc_into_select(THD *thd, st_select_lex *tvc_sl)
{
if (engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE)
((subselect_single_select_engine *) engine)->change_select(wrapper_sl);
- lex->current_select= wrapper_sl;
- return false;
- }
- else
- {
- lex->current_select= parent_select;
- return true;
}
+ lex->current_select= parent_select;
+ return wrapper_sl;
}
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 36f128a3a0b..e92dc97771e 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -2671,9 +2671,12 @@ bool Type_handler::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
- def->create_length_to_internal_length_simple();
+ def->prepare_stage1_simple(&my_charset_bin);
return false;
}
@@ -2682,8 +2685,12 @@ bool Type_handler_null::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
+ def->prepare_charset_for_string(derived_attr);
def->create_length_to_internal_length_null();
return false;
}
@@ -2693,19 +2700,56 @@ bool Type_handler_row::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
+ def->charset= &my_charset_bin;
def->create_length_to_internal_length_null();
return false;
}
+bool Type_handler_temporal_result::
+ Column_definition_prepare_stage1(THD *thd,
+ MEM_ROOT *mem_root,
+ Column_definition *def,
+ handler *file,
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
+{
+ def->prepare_stage1_simple(&my_charset_numeric);
+ return false;
+}
+
+
+bool Type_handler_numeric::
+ Column_definition_prepare_stage1(THD *thd,
+ MEM_ROOT *mem_root,
+ Column_definition *def,
+ handler *file,
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
+{
+ def->prepare_stage1_simple(&my_charset_numeric);
+ return false;
+}
+
bool Type_handler_newdecimal::
Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
+ def->charset= &my_charset_numeric;
def->create_length_to_internal_length_newdecimal();
return false;
}
@@ -2715,8 +2759,12 @@ bool Type_handler_bit::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
+ def->charset= &my_charset_numeric;
return def->prepare_stage1_bit(thd, mem_root, file, table_flags);
}
@@ -2725,9 +2773,13 @@ bool Type_handler_typelib::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
- return def->prepare_stage1_typelib(thd, mem_root, file, table_flags);
+ return def->prepare_charset_for_string(derived_attr) ||
+ def->prepare_stage1_typelib(thd, mem_root, file, table_flags);
}
@@ -2736,20 +2788,31 @@ bool Type_handler_string_result::
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
- return def->prepare_stage1_string(thd, mem_root, file, table_flags);
+ return def->prepare_charset_for_string(derived_attr) ||
+ def->prepare_stage1_string(thd, mem_root, file, table_flags);
}
#ifdef HAVE_SPATIAL
+#if MYSQL_VERSION_ID > 100500
+#error The below method is in sql/sql_type_geom.cc starting from 10.5
+#endif
bool Type_handler_geometry::
Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *def,
handler *file,
- ulonglong table_flags) const
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const
{
+ def->charset= &my_charset_bin;
def->create_length_to_internal_length_string();
return def->prepare_blob_field(thd);
}
@@ -2758,14 +2821,38 @@ bool Type_handler_geometry::
/*************************************************************************/
+bool Type_handler_general_purpose_string::
+ Column_definition_bulk_alter(Column_definition *def,
+ const Column_derived_attributes
+ *derived_attr,
+ const Column_bulk_alter_attributes
+ *bulk_alter_attr)
+ const
+{
+ if (!bulk_alter_attr->alter_table_convert_to_charset())
+ return false; // No "CONVERT TO" clause.
+ CHARSET_INFO *defcs= def->explicit_or_derived_charset(derived_attr);
+ DBUG_ASSERT(defcs);
+ /*
+ Handle 'ALTER TABLE t1 CONVERT TO CHARACTER SET csname'.
+ Change character sets for all varchar/char/text columns,
+ but do not touch varbinary/binary/blob columns.
+ */
+ if (defcs != &my_charset_bin)
+ def->charset= bulk_alter_attr->alter_table_convert_to_charset();
+ return false;
+};
+
+
+/*************************************************************************/
+
bool Type_handler::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
def->create_length_to_internal_length_simple();
return false;
}
@@ -2774,11 +2861,10 @@ bool Type_handler::
bool Type_handler_null::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
def->create_length_to_internal_length_null();
return false;
}
@@ -2787,11 +2873,10 @@ bool Type_handler_null::
bool Type_handler_newdecimal::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
def->create_length_to_internal_length_newdecimal();
return false;
}
@@ -2800,11 +2885,10 @@ bool Type_handler_newdecimal::
bool Type_handler_string_result::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
def->set_compression_method(dup->compression_method());
def->create_length_to_internal_length_string();
return false;
@@ -2814,11 +2898,10 @@ bool Type_handler_string_result::
bool Type_handler_typelib::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
def->create_length_to_internal_length_typelib();
return false;
}
@@ -2827,11 +2910,10 @@ bool Type_handler_typelib::
bool Type_handler_bit::
Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
- def->redefine_stage1_common(dup, file, schema);
+ def->redefine_stage1_common(dup, file);
/*
If we are replacing a field with a BIT field, we need
to initialize pack_flag.
@@ -8633,11 +8715,18 @@ LEX_CSTRING Charset::collation_specific_name() const
for character sets and collations, so a collation
name not necessarily starts with the character set name.
*/
+ LEX_CSTRING retval;
size_t csname_length= strlen(m_charset->csname);
if (strncmp(m_charset->name, m_charset->csname, csname_length))
- return {NULL, 0};
+ {
+ retval.str= NULL;
+ retval.length= 0;
+ return retval;
+ }
const char *ptr= m_charset->name + csname_length;
- return {ptr, strlen(ptr) };
+ retval.str= ptr;
+ retval.length= strlen(ptr);
+ return retval;
}
diff --git a/sql/sql_type.h b/sql/sql_type.h
index f8ebc269788..7a514643bf6 100644
--- a/sql/sql_type.h
+++ b/sql/sql_type.h
@@ -76,7 +76,6 @@ class Spvar_definition;
struct st_value;
class Protocol;
class handler;
-struct Schema_specification_st;
struct TABLE;
struct SORT_FIELD_ATTR;
class Vers_history_point;
@@ -110,6 +109,53 @@ enum scalar_comparison_op
};
+/*
+ A helper class to store column attributes that are inherited
+ by columns (from the table level) when not specified explicitly.
+*/
+class Column_derived_attributes
+{
+ /*
+ Table level CHARACTER SET and COLLATE value:
+
+ CREATE TABLE t1 (a VARCHAR(1), b CHAR(2)) CHARACTER SET latin1;
+
+ All character string columns (CHAR, VARCHAR, TEXT)
+ inherit CHARACTER SET from the table level.
+ */
+ CHARSET_INFO *m_charset;
+public:
+ explicit Column_derived_attributes(CHARSET_INFO *cs)
+ :m_charset(cs)
+ { }
+ CHARSET_INFO *charset() const { return m_charset; }
+};
+
+
+/*
+ A helper class to store requests for changes
+ in multiple column data types during ALTER.
+*/
+class Column_bulk_alter_attributes
+{
+ /*
+ Target CHARACTER SET specification in ALTER .. CONVERT, e.g.
+
+ ALTER TABLE t1 CONVERT TO CHARACTER SET utf8;
+
+ All character string columns (CHAR, VARCHAR, TEXT)
+ get converted to the "CONVERT TO CHARACTER SET".
+ */
+ CHARSET_INFO *m_alter_table_convert_to_charset;
+public:
+ explicit Column_bulk_alter_attributes(CHARSET_INFO *convert)
+ :m_alter_table_convert_to_charset(convert)
+ { }
+ CHARSET_INFO *alter_table_convert_to_charset() const
+ { return m_alter_table_convert_to_charset; }
+};
+
+
class Native: public Binary_string
{
public:
@@ -3597,7 +3643,17 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
+ virtual bool Column_definition_bulk_alter(Column_definition *c,
+ const Column_derived_attributes
+ *derived_attr,
+ const Column_bulk_alter_attributes
+ *bulk_alter_attr)
+ const
+ { return false; }
/*
This method is called on queries like:
CREATE TABLE t2 (a INT) AS SELECT a FROM t1;
@@ -3616,9 +3672,7 @@ public:
*/
virtual bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *
- schema)
+ const handler *file)
const;
virtual bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
@@ -3655,8 +3709,6 @@ public:
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const= 0;
- virtual bool is_packable() const { return false; }
-
virtual uint32 max_display_length(const Item *item) const= 0;
virtual uint32 Item_decimal_notation_int_digits(const Item *item) const { return 0; }
virtual uint32 calc_pack_length(uint32 length) const= 0;
@@ -4010,11 +4062,13 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const
{
DBUG_ASSERT(0);
@@ -4296,6 +4350,14 @@ class Type_handler_numeric: public Type_handler
{
public:
String *print_item_value(THD *thd, Item *item, String *str) const;
+ bool Column_definition_prepare_stage1(THD *thd,
+ MEM_ROOT *mem_root,
+ Column_definition *c,
+ handler *file,
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
double Item_func_min_max_val_real(Item_func_min_max *) const;
longlong Item_func_min_max_val_int(Item_func_min_max *) const;
my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
@@ -4736,6 +4798,14 @@ public:
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
+ bool Column_definition_prepare_stage1(THD *thd,
+ MEM_ROOT *mem_root,
+ Column_definition *c,
+ handler *file,
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Item_const_eq(const Item_const *a, const Item_const *b,
bool binary_cmp) const;
bool Item_param_set_from_value(THD *thd,
@@ -4816,19 +4886,19 @@ public:
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
- bool is_packable()const { return true; }
-
bool union_element_finalize(const Item * item) const;
bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const;
uint32 max_display_length(const Item *item) const;
/*
@@ -4939,6 +5009,12 @@ class Type_handler_general_purpose_string: public Type_handler_string_result
public:
bool is_general_purpose_string_type() const { return true; }
bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const;
+ bool Column_definition_bulk_alter(Column_definition *c,
+ const Column_derived_attributes
+ *derived_attr,
+ const Column_bulk_alter_attributes
+ *bulk_alter_attr)
+ const;
};
@@ -5295,11 +5371,13 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
@@ -5979,11 +6057,13 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
@@ -6025,11 +6105,13 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
@@ -6352,7 +6434,10 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
ulonglong table_flags) const;
@@ -6428,11 +6513,13 @@ public:
MEM_ROOT *mem_root,
Column_definition *c,
handler *file,
- ulonglong table_flags) const;
+ ulonglong table_flags,
+ const Column_derived_attributes
+ *derived_attr)
+ const;
bool Column_definition_redefine_stage1(Column_definition *def,
const Column_definition *dup,
- const handler *file,
- const Schema_specification_st *schema)
+ const handler *file)
const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 027d5882722..b295112b70d 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -405,7 +405,10 @@ select_union_recursive::create_result_table(THD *thd_arg,
hidden))
return true;
- if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
+ incr_table_param.init();
+ incr_table_param.field_count= column_types->elements;
+ incr_table_param.bit_fields_as_long= bit_fields_as_long;
+ if (! (incr_table= create_tmp_table(thd_arg, &incr_table_param, *column_types,
(ORDER*) 0, false, 1,
options, HA_POS_ERROR, &empty_clex_str,
true, keep_row_order)))
@@ -415,20 +418,6 @@ select_union_recursive::create_result_table(THD *thd_arg,
for (uint i=0; i < table->s->fields; i++)
incr_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
- TABLE *rec_table= 0;
- if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
- (ORDER*) 0, false, 1,
- options, HA_POS_ERROR, alias,
- true, keep_row_order)))
- return true;
-
- rec_table->keys_in_use_for_query.clear_all();
- for (uint i=0; i < table->s->fields; i++)
- rec_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
-
- if (rec_tables.push_back(rec_table))
- return true;
-
return false;
}
@@ -466,23 +455,25 @@ void select_union_recursive::cleanup()
free_tmp_table(thd, incr_table);
}
- List_iterator<TABLE> it(rec_tables);
- TABLE *tab;
- while ((tab= it++))
+ List_iterator<TABLE_LIST> it(rec_table_refs);
+ TABLE_LIST *tbl;
+ while ((tbl= it++))
{
+ TABLE *tab= tbl->table;
if (tab->is_created())
{
tab->file->extra(HA_EXTRA_RESET_STATE);
tab->file->ha_delete_all_rows();
}
- /*
+ /*
The table will be closed later in close_thread_tables(),
because it might be used in the statements like
ANALYZE WITH r AS (...) SELECT * from r
- where r is defined through recursion.
+ where r is defined through recursion.
*/
tab->next= thd->rec_tables;
thd->rec_tables= tab;
+ tbl->derived_result= 0;
}
}
@@ -1138,9 +1129,33 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
goto err;
if (!derived_arg->table)
{
- derived_arg->table= with_element->rec_result->rec_tables.head();
- if (derived_arg->derived_result)
- derived_arg->derived_result->table= derived_arg->table;
+ bool res= false;
+
+ if ((!derived_arg->is_with_table_recursive_reference() ||
+ !derived_arg->derived_result) &&
+ !(derived_arg->derived_result=
+ new (thd->mem_root) select_unit(thd)))
+ goto err; // out of memory
+ thd->create_tmp_table_for_derived= TRUE;
+
+ res= derived_arg->derived_result->create_result_table(thd,
+ &types,
+ FALSE,
+ create_options,
+ &derived_arg->alias,
+ FALSE, FALSE,
+ FALSE, 0);
+ thd->create_tmp_table_for_derived= FALSE;
+ if (res)
+ goto err;
+ derived_arg->derived_result->set_unit(this);
+ derived_arg->table= derived_arg->derived_result->table;
+ if (derived_arg->is_with_table_recursive_reference())
+ {
+ /* Here 'derived_arg' is the primary recursive table reference */
+ derived_arg->with->rec_result->
+ rec_table_refs.push_back(derived_arg);
+ }
}
with_element->mark_as_with_prepared_anchor();
is_rec_result_table_created= true;
@@ -1783,11 +1798,11 @@ bool st_select_lex_unit::exec_recursive()
TABLE *incr_table= with_element->rec_result->incr_table;
st_select_lex *end= NULL;
bool is_unrestricted= with_element->is_unrestricted();
- List_iterator_fast<TABLE> li(with_element->rec_result->rec_tables);
+ List_iterator_fast<TABLE_LIST> li(with_element->rec_result->rec_table_refs);
TMP_TABLE_PARAM *tmp_table_param= &with_element->rec_result->tmp_table_param;
ha_rows examined_rows= 0;
bool was_executed= executed;
- TABLE *rec_table;
+ TABLE_LIST *rec_tbl;
DBUG_ENTER("st_select_lex_unit::exec_recursive");
@@ -1865,8 +1880,9 @@ bool st_select_lex_unit::exec_recursive()
else
with_element->level++;
- while ((rec_table= li++))
+ while ((rec_tbl= li++))
{
+ TABLE *rec_table= rec_tbl->table;
saved_error=
incr_table->insert_all_rows_into_tmp_table(thd, rec_table,
tmp_table_param,
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 9514b193407..41ea52ef3c3 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2011, 2020, MariaDB
+ Copyright (c) 2011, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -52,8 +52,9 @@
compare_record(TABLE*).
*/
bool records_are_comparable(const TABLE *table) {
- return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
- bitmap_is_subset(table->write_set, table->read_set);
+ return !table->versioned(VERS_TRX_ID) &&
+ (((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
+ bitmap_is_subset(table->write_set, table->read_set));
}
@@ -1081,20 +1082,9 @@ update_begin:
}
else if (likely(!error))
{
- if (has_vers_fields && table->versioned())
- {
- if (table->versioned(VERS_TIMESTAMP))
- {
- store_record(table, record[2]);
- table->mark_columns_per_binlog_row_image();
- error= vers_insert_history_row(table);
- restore_record(table, record[2]);
- }
- if (likely(!error))
- rows_inserted++;
- }
- if (likely(!error))
- updated++;
+ if (has_vers_fields && table->versioned(VERS_TRX_ID))
+ rows_inserted++;
+ updated++;
}
if (likely(!error) && !record_was_same && table_list->has_period())
@@ -1110,6 +1100,20 @@ update_begin:
if (unlikely(error) &&
(!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
{
+ goto error;
+ }
+ }
+
+ if (likely(!error) && has_vers_fields && table->versioned(VERS_TIMESTAMP))
+ {
+ store_record(table, record[2]);
+ table->mark_columns_per_binlog_row_image();
+ table->clone_handler_for_update();
+ error= vers_insert_history_row(table);
+ restore_record(table, record[2]);
+ if (unlikely(error))
+ {
+error:
/*
If (ignore && error is ignorable) we don't have to
do anything; otherwise...
@@ -1120,10 +1124,11 @@ update_begin:
flags|= ME_FATAL; /* Other handler errors are fatal */
prepare_record_for_error_message(error, table);
- table->file->print_error(error,MYF(flags));
- error= 1;
- break;
- }
+ table->file->print_error(error,MYF(flags));
+ error= 1;
+ break;
+ }
+ rows_inserted++;
}
if (table->triggers &&
@@ -1705,12 +1710,8 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd)
call in setup_tables()).
*/
- if (setup_tables_and_check_access(thd, &select_lex->context,
- &select_lex->top_join_list, table_list, select_lex->leaf_tables,
- FALSE, UPDATE_ACL, SELECT_ACL, FALSE))
- DBUG_RETURN(1);
-
- if (select_lex->handle_derived(thd->lex, DT_MERGE))
+ if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
+ table_list, select_lex->leaf_tables, FALSE, TRUE))
DBUG_RETURN(1);
List<Item> *fields= &lex->first_select_lex()->item_list;
@@ -1848,7 +1849,11 @@ int mysql_multi_update_prepare(THD *thd)
During prepare phase acquire only S metadata locks instead of SW locks to
keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
and global read lock.
+
+ Don't evaluate any subqueries even if constant, because
+ tables aren't locked yet.
*/
+ lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI)
{
if (open_tables(thd, &table_list, &table_count,
@@ -1871,6 +1876,9 @@ int mysql_multi_update_prepare(THD *thd)
{
DBUG_RETURN(TRUE);
}
+
+ lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
+
(void) read_statistics_for_tables_if_needed(thd, table_list);
/* @todo: downgrade the metadata locks here. */
@@ -1929,9 +1937,16 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields,
DBUG_RETURN(TRUE);
}
+ if ((*result)->init(thd))
+ DBUG_RETURN(1);
+
thd->abort_on_warning= !ignore && thd->is_strict_mode();
List<Item> total_list;
+ if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
+ table_list, select_lex->leaf_tables, FALSE, FALSE))
+ DBUG_RETURN(1);
+
if (select_lex->vers_setup_conds(thd, table_list))
DBUG_RETURN(1);
@@ -1973,6 +1988,24 @@ multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
}
+bool multi_update::init(THD *thd)
+{
+ table_map tables_to_update= get_table_map(fields);
+ List_iterator_fast<TABLE_LIST> li(*leaves);
+ TABLE_LIST *tbl;
+ while ((tbl =li++))
+ {
+ if (tbl->is_jtbm())
+ continue;
+ if (!(tbl->table->map & tables_to_update))
+ continue;
+ if (updated_leaves.push_back(tbl, thd->mem_root))
+ return true;
+ }
+ return false;
+}
+
+
/*
Connect fields with tables and create list of tables that are updated
*/
@@ -1989,7 +2022,7 @@ int multi_update::prepare(List<Item> &not_used_values,
List_iterator_fast<Item> value_it(*values);
uint i, max_fields;
uint leaf_table_count= 0;
- List_iterator<TABLE_LIST> ti(*leaves);
+ List_iterator<TABLE_LIST> ti(updated_leaves);
DBUG_ENTER("multi_update::prepare");
if (prepared)
@@ -2477,6 +2510,7 @@ int multi_update::send_data(List<Item> &not_used_values)
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
+ int error= 0;
TABLE *table= cur_table->table;
uint offset= cur_table->shared;
/*
@@ -2520,7 +2554,6 @@ int multi_update::send_data(List<Item> &not_used_values)
found++;
if (!can_compare_record || compare_record(table))
{
- int error;
if ((error= cur_table->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
@@ -2547,20 +2580,7 @@ int multi_update::send_data(List<Item> &not_used_values)
updated--;
if (!ignore ||
table->file->is_fatal_error(error, HA_CHECK_ALL))
- {
- /*
- If (ignore && error == is ignorable) we don't have to
- do anything; otherwise...
- */
- myf flags= 0;
-
- if (table->file->is_fatal_error(error, HA_CHECK_ALL))
- flags|= ME_FATAL; /* Other handler errors are fatal */
-
- prepare_record_for_error_message(error, table);
- table->file->print_error(error,MYF(flags));
- DBUG_RETURN(1);
- }
+ goto error;
}
else
{
@@ -2569,19 +2589,8 @@ int multi_update::send_data(List<Item> &not_used_values)
error= 0;
updated--;
}
- else if (has_vers_fields && table->versioned())
+ else if (has_vers_fields && table->versioned(VERS_TRX_ID))
{
- if (table->versioned(VERS_TIMESTAMP))
- {
- store_record(table, record[2]);
- if (vers_insert_history_row(table))
- {
- restore_record(table, record[2]);
- error= 1;
- break;
- }
- restore_record(table, record[2]);
- }
updated_sys_ver++;
}
/* non-transactional or transactional table got modified */
@@ -2595,6 +2604,18 @@ int multi_update::send_data(List<Item> &not_used_values)
}
}
}
+ if (has_vers_fields && table->versioned(VERS_TIMESTAMP))
+ {
+ store_record(table, record[2]);
+ table->clone_handler_for_update();
+ if (unlikely(error= vers_insert_history_row(table)))
+ {
+ restore_record(table, record[2]);
+ goto error;
+ }
+ restore_record(table, record[2]);
+ updated_sys_ver++;
+ }
if (table->triggers &&
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE)))
@@ -2602,7 +2623,6 @@ int multi_update::send_data(List<Item> &not_used_values)
}
else
{
- int error;
TABLE *tmp_table= tmp_tables[offset];
if (copy_funcs(tmp_table_param[offset].items_to_copy, thd))
DBUG_RETURN(1);
@@ -2637,7 +2657,22 @@ int multi_update::send_data(List<Item> &not_used_values)
}
}
}
- }
+ continue;
+error:
+ DBUG_ASSERT(error > 0);
+ /*
+ If (ignore && error == is ignorable) we don't have to
+ do anything; otherwise...
+ */
+ myf flags= 0;
+
+ if (table->file->is_fatal_error(error, HA_CHECK_ALL))
+ flags|= ME_FATAL; /* Other handler errors are fatal */
+
+ prepare_record_for_error_message(error, table);
+ table->file->print_error(error,MYF(flags));
+ DBUG_RETURN(1);
+ } // for (cur_table)
DBUG_RETURN(0);
}
@@ -3034,14 +3069,18 @@ bool multi_update::send_eof()
DBUG_ASSERT(trans_safe || !updated ||
thd->transaction.stmt.modified_non_trans_table);
- if (likely(local_error != 0))
- error_handled= TRUE; // to force early leave from ::abort_result_set()
-
- if (unlikely(local_error > 0)) // if the above log write did not fail ...
+ if (unlikely(local_error))
{
- /* Safety: If we haven't got an error before (can happen in do_updates) */
- my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
- MYF(0));
+ error_handled= TRUE; // to force early leave from ::abort_result_set()
+ if (thd->killed == NOT_KILLED && !thd->get_stmt_da()->is_set())
+ {
+ /*
+ No error message was sent and query was not killed (in which case
+ mysql_execute_command() will send the error mesage).
+ */
+ my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
+ MYF(0));
+ }
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 0a18a852832..126db90656c 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -292,6 +292,8 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
{
for (tbl= sl->get_table_list(); tbl; tbl= tbl->next_local)
{
+ if (!tbl->with && tbl->select_lex)
+ tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl);
/*
Ensure that we have some privileges on this table, more strict check
will be done on column level after preparation,
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 47102c3c6aa..9b2355e53c5 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -829,7 +829,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
/*
We should not introduce any further shift/reduce conflicts.
*/
-%expect 54
+%expect 68
/*
Comments for TOKENS.
@@ -1808,7 +1808,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <type_handler> int_type real_type
-%type <Lex_field_type> type_with_opt_collate field_type
+%type <Lex_field_type> field_type field_type_all
qualified_field_type
field_type_numeric
field_type_string
@@ -1879,7 +1879,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <item>
literal insert_ident order_ident temporal_literal
simple_ident expr sum_expr in_sum_expr
- variable variable_aux bool_pri
+ variable variable_aux
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
primary_expr string_factor_expr mysql_concatenation_expr
@@ -3392,7 +3392,7 @@ sp_param_name:
;
sp_param_name_and_type:
- sp_param_name type_with_opt_collate
+ sp_param_name field_type
{
if (unlikely(Lex->sp_param_fill_definition($$= $1)))
MYSQL_YYABORT;
@@ -3522,7 +3522,7 @@ row_field_name:
;
row_field_definition:
- row_field_name type_with_opt_collate
+ row_field_name field_type
;
row_field_definition_list:
@@ -3551,7 +3551,7 @@ sp_decl_idents_init_vars:
sp_decl_variable_list:
sp_decl_idents_init_vars
- type_with_opt_collate
+ field_type
sp_opt_default
{
if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1,
@@ -6888,19 +6888,26 @@ column_default_expr:
}
;
+field_type: field_type_all
+ {
+ Lex->map_data_type(Lex_ident_sys(), &($$= $1));
+ Lex->last_field->set_attributes($$, Lex->charset);
+ }
+ ;
+
qualified_field_type:
- field_type
+ field_type_all
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
}
- | sp_decl_ident '.' field_type
+ | sp_decl_ident '.' field_type_all
{
if (Lex->map_data_type($1, &($$= $3)))
MYSQL_YYABORT;
}
;
-field_type:
+field_type_all:
field_type_numeric
| field_type_temporal
| field_type_string
@@ -7363,20 +7370,6 @@ with_or_without_system:
;
-type_with_opt_collate:
- field_type opt_collate
- {
- Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
- Lex->last_field->set_attributes($$, Lex->charset);
- }
- ;
-
charset:
CHAR_SYM SET {}
| CHARSET {}
@@ -7450,6 +7443,12 @@ charset_or_alias:
}
;
+collate: COLLATE_SYM collation_name_or_default
+ {
+ Lex->charset= $2;
+ }
+ ;
+
opt_binary:
/* empty */ { bincmp_collation(NULL, false); }
| binary {}
@@ -7460,6 +7459,13 @@ binary:
| charset_or_alias opt_bin_mod { bincmp_collation($1, $2); }
| BINARY { bincmp_collation(NULL, true); }
| BINARY charset_or_alias { bincmp_collation($2, true); }
+ | charset_or_alias collate
+ {
+ if (!my_charset_same(Lex->charset, $1))
+ my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ Lex->charset->name, $1->csname));
+ }
+ | collate { }
;
opt_bin_mod:
@@ -10064,23 +10070,19 @@ expr:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri
- ;
-
-bool_pri:
- bool_pri EQUAL_SYM predicate %prec EQUAL_SYM
+ | expr EQUAL_SYM predicate %prec EQUAL_SYM
{
$$= new (thd->mem_root) Item_func_equal(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op predicate %prec '='
+ | expr comp_op predicate %prec '='
{
$$= (*$2)(0)->create(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op all_or_any '(' subselect ')' %prec '='
+ | expr comp_op all_or_any '(' subselect ')' %prec '='
{
$$= all_any_subquery_creator(thd, $1, $2, $3, $5);
if (unlikely($$ == NULL))
@@ -14993,7 +14995,7 @@ kill:
lex->sql_command= SQLCOM_KILL;
lex->kill_type= KILL_TYPE_ID;
}
- kill_type kill_option kill_expr
+ kill_type kill_option
{
Lex->kill_signal= (killed_state) ($3 | $4);
}
@@ -15006,16 +15008,21 @@ kill_type:
;
kill_option:
- /* empty */ { $$= (int) KILL_CONNECTION; }
- | CONNECTION_SYM { $$= (int) KILL_CONNECTION; }
- | QUERY_SYM { $$= (int) KILL_QUERY; }
- | QUERY_SYM ID_SYM
+ opt_connection kill_expr { $$= (int) KILL_CONNECTION; }
+ | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; }
+ | QUERY_SYM ID_SYM expr
{
$$= (int) KILL_QUERY;
Lex->kill_type= KILL_TYPE_QUERY;
+ Lex->value_list.push_front($3, thd->mem_root);
}
;
+opt_connection:
+ /* empty */ { }
+ | CONNECTION_SYM { }
+ ;
+
kill_expr:
expr
{
@@ -15028,7 +15035,6 @@ kill_expr:
}
;
-
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
shutdown_option {}
@@ -18028,7 +18034,7 @@ sf_return_type:
&empty_clex_str,
thd->variables.collation_database);
}
- type_with_opt_collate
+ field_type
{
if (unlikely(Lex->sphead->fill_field_definition(thd,
Lex->last_field)))
diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy
index ac4929468b1..cb58c4aff43 100644
--- a/sql/sql_yacc_ora.yy
+++ b/sql/sql_yacc_ora.yy
@@ -305,7 +305,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
/*
We should not introduce any further shift/reduce conflicts.
*/
-%expect 57
+%expect 70
/*
Comments for TOKENS.
@@ -1288,9 +1288,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <type_handler> int_type real_type
-%type <Lex_field_type> type_with_opt_collate field_type
+%type <Lex_field_type> field_type field_type_all
qualified_field_type
- sp_param_type_with_opt_collate
+ sp_param_type
sp_param_field_type
sp_param_field_type_string
field_type_numeric
@@ -1362,7 +1362,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <item>
literal insert_ident order_ident temporal_literal
simple_ident expr sum_expr in_sum_expr
- variable variable_aux bool_pri
+ variable variable_aux
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
primary_expr string_factor_expr mysql_concatenation_expr
@@ -3194,7 +3194,7 @@ sp_param_name:
;
sp_param_name_and_type:
- sp_param_name sp_param_type_with_opt_collate
+ sp_param_name sp_param_type
{
if (unlikely(Lex->sp_param_fill_definition($$= $1)))
MYSQL_YYABORT;
@@ -3238,7 +3238,7 @@ sp_pdparams:
;
sp_pdparam:
- sp_param_name sp_opt_inout sp_param_type_with_opt_collate
+ sp_param_name sp_opt_inout sp_param_type
{
$1->mode= $2;
if (unlikely(Lex->sp_param_fill_definition($1)))
@@ -3407,7 +3407,7 @@ row_field_name:
;
row_field_definition:
- row_field_name type_with_opt_collate
+ row_field_name field_type
;
row_field_definition_list:
@@ -3436,7 +3436,7 @@ sp_decl_idents_init_vars:
sp_decl_vars:
sp_decl_idents_init_vars
- type_with_opt_collate
+ field_type
sp_opt_default
{
if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1,
@@ -6891,19 +6891,26 @@ column_default_expr:
}
;
+field_type: field_type_all
+ {
+ Lex->map_data_type(Lex_ident_sys(), &($$= $1));
+ Lex->last_field->set_attributes($$, Lex->charset);
+ }
+ ;
+
qualified_field_type:
- field_type
+ field_type_all
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
}
- | sp_decl_ident '.' field_type
+ | sp_decl_ident '.' field_type_all
{
if (Lex->map_data_type($1, &($$= $3)))
MYSQL_YYABORT;
}
;
-field_type:
+field_type_all:
field_type_numeric
| field_type_temporal
| field_type_string
@@ -7445,30 +7452,10 @@ with_or_without_system:
;
-type_with_opt_collate:
- field_type opt_collate
- {
- Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
- Lex->last_field->set_attributes($$, Lex->charset);
- }
- ;
-
-sp_param_type_with_opt_collate:
- sp_param_field_type opt_collate
+sp_param_type:
+ sp_param_field_type
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
Lex->last_field->set_attributes($$, Lex->charset);
}
;
@@ -7546,6 +7533,12 @@ charset_or_alias:
}
;
+collate: COLLATE_SYM collation_name_or_default
+ {
+ Lex->charset= $2;
+ }
+ ;
+
opt_binary:
/* empty */ { bincmp_collation(NULL, false); }
| binary {}
@@ -7556,6 +7549,13 @@ binary:
| charset_or_alias opt_bin_mod { bincmp_collation($1, $2); }
| BINARY { bincmp_collation(NULL, true); }
| BINARY charset_or_alias { bincmp_collation($2, true); }
+ | charset_or_alias collate
+ {
+ if (!my_charset_same(Lex->charset, $1))
+ my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ Lex->charset->name, $1->csname));
+ }
+ | collate { }
;
opt_bin_mod:
@@ -10168,23 +10168,19 @@ expr:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri
- ;
-
-bool_pri:
- bool_pri EQUAL_SYM predicate %prec EQUAL_SYM
+ | expr EQUAL_SYM predicate %prec EQUAL_SYM
{
$$= new (thd->mem_root) Item_func_equal(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op predicate %prec '='
+ | expr comp_op predicate %prec '='
{
$$= (*$2)(0)->create(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op all_or_any '(' subselect ')' %prec '='
+ | expr comp_op all_or_any '(' subselect ')' %prec '='
{
$$= all_any_subquery_creator(thd, $1, $2, $3, $5);
if (unlikely($$ == NULL))
@@ -15117,7 +15113,7 @@ kill:
lex->sql_command= SQLCOM_KILL;
lex->kill_type= KILL_TYPE_ID;
}
- kill_type kill_option kill_expr
+ kill_type kill_option
{
Lex->kill_signal= (killed_state) ($3 | $4);
}
@@ -15130,16 +15126,21 @@ kill_type:
;
kill_option:
- /* empty */ { $$= (int) KILL_CONNECTION; }
- | CONNECTION_SYM { $$= (int) KILL_CONNECTION; }
- | QUERY_SYM { $$= (int) KILL_QUERY; }
- | QUERY_SYM ID_SYM
+ opt_connection kill_expr { $$= (int) KILL_CONNECTION; }
+ | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; }
+ | QUERY_SYM ID_SYM expr
{
$$= (int) KILL_QUERY;
Lex->kill_type= KILL_TYPE_QUERY;
+ Lex->value_list.push_front($3, thd->mem_root);
}
;
+opt_connection:
+ /* empty */ { }
+ | CONNECTION_SYM { }
+ ;
+
kill_expr:
expr
{
@@ -15152,7 +15153,6 @@ kill_expr:
}
;
-
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
shutdown_option {}
@@ -18240,7 +18240,7 @@ sf_return_type:
&empty_clex_str,
thd->variables.collation_database);
}
- sp_param_type_with_opt_collate
+ sp_param_type
{
if (unlikely(Lex->sphead->fill_field_definition(thd,
Lex->last_field)))
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 99ff9c50588..58647c21e44 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -79,14 +79,17 @@ ulonglong find_set(TYPELIB *lib, const char *str, size_t length, CHARSET_INFO *c
var_len= (uint) (pos - start);
uint find= cs ? find_type2(lib, start, var_len, cs) :
find_type(lib, start, var_len, (bool) 0);
- if (unlikely(!find && *err_len == 0))
+ if (unlikely(!find))
{
- // report the first error with length > 0
- *err_pos= (char*) start;
- *err_len= var_len;
- *set_warning= 1;
+ if (*err_len == 0)
+ {
+ // report the first error with length > 0
+ *err_pos= (char*) start;
+ *err_len= var_len;
+ *set_warning= 1;
+ }
}
- else
+ else if (find <= sizeof(longlong) * 8)
found|= 1ULL << (find - 1);
if (pos >= end)
break;
@@ -400,4 +403,3 @@ const char *flagset_to_string(THD *thd, LEX_CSTRING *result, ulonglong set,
return result->str;
}
-
diff --git a/sql/structs.h b/sql/structs.h
index 28c0cb6e1a2..215fe7b60ea 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -92,7 +92,7 @@ class engine_option_value;
struct ha_index_option_struct;
typedef struct st_key {
- uint key_length; /* Tot length of key */
+ uint key_length; /* total length of user defined key parts */
ulong flags; /* dupp key and pack flags */
uint user_defined_key_parts; /* How many key_parts */
uint usable_key_parts; /* Should normally be = user_defined_key_parts */
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 4c3028b0a00..c52a8f742a8 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2352,7 +2352,7 @@ static Sys_var_ulong Sys_max_sort_length(
"the first max_sort_length bytes of each value are used; the rest "
"are ignored)",
SESSION_VAR(max_sort_length), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(8, 8192*1024L), DEFAULT(1024), BLOCK_SIZE(1));
+ VALID_RANGE(64, 8192*1024L), DEFAULT(1024), BLOCK_SIZE(1));
static Sys_var_ulong Sys_max_sp_recursion_depth(
"max_sp_recursion_depth",
@@ -4570,11 +4570,11 @@ static Sys_var_ulong Sys_default_week_format(
SESSION_VAR(default_week_format), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 7), DEFAULT(0), BLOCK_SIZE(1));
-static Sys_var_ulonglong Sys_group_concat_max_len(
+static Sys_var_uint Sys_group_concat_max_len(
"group_concat_max_len",
"The maximum length of the result of function GROUP_CONCAT()",
SESSION_VAR(group_concat_max_len), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(4, SIZE_T_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1));
+ VALID_RANGE(4, UINT_MAX32), DEFAULT(1024*1024), BLOCK_SIZE(1));
static char *glob_hostname_ptr;
static Sys_var_charptr Sys_hostname(
@@ -4834,6 +4834,24 @@ static Sys_var_have Sys_have_symlink(
"--skip-symbolic-links option.",
READ_ONLY GLOBAL_VAR(have_symlink), NO_CMD_LINE);
+#if defined(__SANITIZE_ADDRESS__) || defined(WITH_UBSAN)
+
+#ifdef __SANITIZE_ADDRESS__
+#define SANITIZER_MODE "ASAN"
+#else
+#define SANITIZER_MODE "UBSAN"
+#endif /* __SANITIZE_ADDRESS__ */
+
+static char *have_sanitizer;
+static Sys_var_charptr Sys_have_santitizer(
+ "have_sanitizer",
+ "If the server is compiled with sanitize (compiler option), this "
+ "variable is set to the sanitizer mode used. Possible values are "
+ "ASAN (Address sanitizer) or UBSAN (The Undefined Behavior Sanitizer).",
+ READ_ONLY GLOBAL_VAR(have_sanitizer), NO_CMD_LINE,
+ IN_FS_CHARSET, DEFAULT(SANITIZER_MODE));
+#endif /* defined(__SANITIZE_ADDRESS__) || defined(WITH_UBSAN) */
+
static bool fix_log_state(sys_var *self, THD *thd, enum_var_type type);
static Sys_var_mybool Sys_general_log(
@@ -5418,7 +5436,7 @@ static Sys_var_tz Sys_time_zone(
static Sys_var_charptr Sys_wsrep_provider(
"wsrep_provider", "Path to replication provider library",
- PREALLOCATED GLOBAL_VAR(wsrep_provider), CMD_LINE(REQUIRED_ARG),
+ PREALLOCATED READ_ONLY GLOBAL_VAR(wsrep_provider), CMD_LINE(REQUIRED_ARG),
IN_FS_CHARSET, DEFAULT(WSREP_NONE),
NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(wsrep_provider_check), ON_UPDATE(wsrep_provider_update));
@@ -5445,13 +5463,12 @@ static Sys_var_charptr Sys_wsrep_cluster_name(
ON_CHECK(wsrep_cluster_name_check),
ON_UPDATE(wsrep_cluster_name_update));
-static PolyLock_mutex PLock_wsrep_cluster_config(&LOCK_wsrep_cluster_config);
static Sys_var_charptr Sys_wsrep_cluster_address (
"wsrep_cluster_address", "Address to initially connect to cluster",
PREALLOCATED GLOBAL_VAR(wsrep_cluster_address),
CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(""),
- &PLock_wsrep_cluster_config, NOT_IN_BINLOG,
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(wsrep_cluster_address_check),
ON_UPDATE(wsrep_cluster_address_update));
@@ -5482,7 +5499,7 @@ static Sys_var_ulong Sys_wsrep_slave_threads(
"wsrep_slave_threads", "Number of slave appliers to launch",
GLOBAL_VAR(wsrep_slave_threads), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 512), DEFAULT(1), BLOCK_SIZE(1),
- &PLock_wsrep_cluster_config, NOT_IN_BINLOG,
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
ON_CHECK(0),
ON_UPDATE(wsrep_slave_threads_update));
@@ -5635,7 +5652,7 @@ static Sys_var_ulong Sys_wsrep_max_ws_rows (
static Sys_var_charptr Sys_wsrep_notify_cmd(
"wsrep_notify_cmd", "",
- GLOBAL_VAR(wsrep_notify_cmd),CMD_LINE(REQUIRED_ARG),
+ READ_ONLY GLOBAL_VAR(wsrep_notify_cmd), CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(""));
static Sys_var_mybool Sys_wsrep_certify_nonPK(
diff --git a/sql/table.cc b/sql/table.cc
index 084b441e4c6..12299271ab3 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1127,7 +1127,7 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
thd->stmt_arena= table->expr_arena;
thd->update_charset(&my_charset_utf8mb4_general_ci, table->s->table_charset);
expr_str.append(&parse_vcol_keyword);
- thd->variables.sql_mode &= ~MODE_NO_BACKSLASH_ESCAPES;
+ thd->variables.sql_mode &= ~(MODE_NO_BACKSLASH_ESCAPES | MODE_EMPTY_STRING_IS_NULL);
while (pos < end)
{
@@ -2730,7 +2730,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
else
key_part->key_part_flag|= HA_VAR_LENGTH_PART;
key_part->store_length+=HA_KEY_BLOB_LENGTH;
- keyinfo->key_length+= HA_KEY_BLOB_LENGTH;
+ if (i < keyinfo->user_defined_key_parts)
+ keyinfo->key_length+= HA_KEY_BLOB_LENGTH;
}
if (field->type() == MYSQL_TYPE_BIT)
key_part->key_part_flag|= HA_BIT_PART;
@@ -2827,7 +2828,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
set_if_bigger(share->max_key_length,keyinfo->key_length+
keyinfo->user_defined_key_parts);
- share->total_key_length+= keyinfo->key_length;
/*
MERGE tables do not have unique indexes. But every key could be
an unique index on the underlying MyISAM table. (Bug #10400)
@@ -3210,9 +3210,8 @@ ret:
if (unlikely(thd->is_error() || error))
{
thd->clear_error();
- my_error(ER_SQL_DISCOVER_ERROR, MYF(0),
- plugin_name(db_plugin)->str, db.str, table_name.str,
- sql_copy);
+ my_error(ER_SQL_DISCOVER_ERROR, MYF(0), hton_name(hton)->str,
+ db.str, table_name.str, sql_copy);
DBUG_RETURN(HA_ERR_GENERIC);
}
/* Treat the table as normal table from binary logging point of view */
@@ -4480,7 +4479,7 @@ void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table)
create_info->row_type= share->row_type;
create_info->key_block_size= share->key_block_size;
create_info->default_table_charset= share->table_charset;
- create_info->table_charset= 0;
+ create_info->alter_table_convert_to_charset= 0;
create_info->comment= share->comment;
create_info->transactional= share->transactional;
create_info->page_checksum= share->page_checksum;
diff --git a/sql/table.h b/sql/table.h
index 6073e35fa85..4a739ed1f9f 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -806,7 +806,7 @@ struct TABLE_SHARE
uint rec_buff_length; /* Size of table->record[] buffer */
uint keys, key_parts;
uint ext_key_parts; /* Total number of key parts in extended keys */
- uint max_key_length, max_unique_length, total_key_length;
+ uint max_key_length, max_unique_length;
uint uniques; /* Number of UNIQUE index */
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
@@ -2577,8 +2577,12 @@ struct TABLE_LIST
Indicates what triggers we need to pre-load for this TABLE_LIST
when opening an associated TABLE. This is filled after
the parsed tree is created.
+
+ slave_fk_event_map is filled on the slave side with bitmaps value
+ representing row-based event operation to help find and prelock
+ possible FK constrain-related child tables.
*/
- uint8 trg_event_map;
+ uint8 trg_event_map, slave_fk_event_map;
/* TRUE <=> this table is a const one and was optimized away. */
bool optimized_away;
@@ -3083,25 +3087,25 @@ typedef struct st_open_table_list{
} OPEN_TABLE_LIST;
-static inline my_bitmap_map *tmp_use_all_columns(TABLE *table,
- MY_BITMAP *bitmap)
+static inline MY_BITMAP *tmp_use_all_columns(TABLE *table,
+ MY_BITMAP **bitmap)
{
- my_bitmap_map *old= bitmap->bitmap;
- bitmap->bitmap= table->s->all_set.bitmap;
+ MY_BITMAP *old= *bitmap;
+ *bitmap= &table->s->all_set;
return old;
}
-static inline void tmp_restore_column_map(MY_BITMAP *bitmap,
- my_bitmap_map *old)
+static inline void tmp_restore_column_map(MY_BITMAP **bitmap,
+ MY_BITMAP *old)
{
- bitmap->bitmap= old;
+ *bitmap= old;
}
/* The following is only needed for debugging */
-static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
- MY_BITMAP *bitmap)
+static inline MY_BITMAP *dbug_tmp_use_all_columns(TABLE *table,
+ MY_BITMAP **bitmap)
{
#ifdef DBUG_ASSERT_EXISTS
return tmp_use_all_columns(table, bitmap);
@@ -3110,8 +3114,8 @@ static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
#endif
}
-static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
- my_bitmap_map *old)
+static inline void dbug_tmp_restore_column_map(MY_BITMAP **bitmap,
+ MY_BITMAP *old)
{
#ifdef DBUG_ASSERT_EXISTS
tmp_restore_column_map(bitmap, old);
@@ -3124,22 +3128,22 @@ static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
Provide for the possiblity of the read set being the same as the write set
*/
static inline void dbug_tmp_use_all_columns(TABLE *table,
- my_bitmap_map **save,
- MY_BITMAP *read_set,
- MY_BITMAP *write_set)
+ MY_BITMAP **save,
+ MY_BITMAP **read_set,
+ MY_BITMAP **write_set)
{
#ifdef DBUG_ASSERT_EXISTS
- save[0]= read_set->bitmap;
- save[1]= write_set->bitmap;
+ save[0]= *read_set;
+ save[1]= *write_set;
(void) tmp_use_all_columns(table, read_set);
(void) tmp_use_all_columns(table, write_set);
#endif
}
-static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set,
- MY_BITMAP *write_set,
- my_bitmap_map **old)
+static inline void dbug_tmp_restore_column_maps(MY_BITMAP **read_set,
+ MY_BITMAP **write_set,
+ MY_BITMAP **old)
{
#ifdef DBUG_ASSERT_EXISTS
tmp_restore_column_map(read_set, old[0]);
@@ -3241,6 +3245,12 @@ inline void mark_as_null_row(TABLE *table)
bfill(table->null_flags,table->s->null_bytes,255);
}
+inline void unmark_as_null_row(TABLE *table)
+{
+ table->null_row=0;
+ table->status= STATUS_NO_RECORD;
+}
+
bool is_simple_order(ORDER *order);
class Open_tables_backup;
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
index f2b27b7056b..5e4b5f7a713 100644
--- a/sql/temporary_tables.cc
+++ b/sql/temporary_tables.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2016, 2019, MariaDB Corporation.
+ Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -875,10 +875,12 @@ void THD::restore_tmp_table_share(TMP_TABLE_SHARE *share)
inline bool THD::has_temporary_tables()
{
DBUG_ENTER("THD::has_temporary_tables");
- bool result= (rgi_slave
- ? (rgi_slave->rli->save_temporary_tables &&
- !rgi_slave->rli->save_temporary_tables->is_empty())
- : (has_thd_temporary_tables()));
+ bool result=
+#ifdef HAVE_REPLICATION
+ rgi_slave ? (rgi_slave->rli->save_temporary_tables &&
+ !rgi_slave->rli->save_temporary_tables->is_empty()) :
+#endif
+ has_thd_temporary_tables();
DBUG_RETURN(result);
}
@@ -1508,12 +1510,14 @@ bool THD::lock_temporary_tables()
DBUG_RETURN(false);
}
+#ifdef HAVE_REPLICATION
if (rgi_slave)
{
mysql_mutex_lock(&rgi_slave->rli->data_lock);
temporary_tables= rgi_slave->rli->save_temporary_tables;
m_tmp_tables_locked= true;
}
+#endif
DBUG_RETURN(m_tmp_tables_locked);
}
@@ -1534,6 +1538,7 @@ void THD::unlock_temporary_tables()
DBUG_VOID_RETURN;
}
+#ifdef HAVE_REPLICATION
if (rgi_slave)
{
rgi_slave->rli->save_temporary_tables= temporary_tables;
@@ -1541,6 +1546,7 @@ void THD::unlock_temporary_tables()
mysql_mutex_unlock(&rgi_slave->rli->data_lock);
m_tmp_tables_locked= false;
}
+#endif
DBUG_VOID_RETURN;
}
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 17222efe791..8340901175a 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -57,13 +57,7 @@ static bool make_empty_rec(THD *, uchar *, uint, List<Create_field> &, uint,
*/
static uchar *extra2_write_len(uchar *pos, size_t len)
{
- /* TODO: should be
- if (len > 0 && len <= 255)
- *pos++= (uchar)len;
- ...
- because extra2_read_len() uses 0 for 2-byte lengths.
- extra2_str_size() must be fixed too.
- */
+ DBUG_ASSERT(len);
if (len <= 255)
*pos++= (uchar)len;
else
@@ -1028,6 +1022,8 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
TABLE table;
TABLE_SHARE share;
Create_field *field;
+ Check_level_instant_set old_count_cuted_fields(thd, CHECK_FIELD_WARN);
+ Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
@@ -1046,7 +1042,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
null_pos= buff;
List_iterator<Create_field> it(create_fields);
- Check_level_instant_set check_level_save(thd, CHECK_FIELD_WARN);
while ((field=it++))
{
Record_addr addr(buff + field->offset + data_offset,
diff --git a/sql/upgrade_conf_file.cc b/sql/upgrade_conf_file.cc
index 4e167f0263f..b40cce0bbd7 100644
--- a/sql/upgrade_conf_file.cc
+++ b/sql/upgrade_conf_file.cc
@@ -75,6 +75,7 @@ static const char *removed_variables[] =
"innodb_ibuf_accel_rate",
"innodb_ibuf_active_contract",
"innodb_ibuf_max_size",
+"innodb_idle_flush_pct",
"innodb_import_table_from_xtrabackup",
"innodb_instrument_semaphores",
"innodb_kill_idle_transaction",
diff --git a/sql/win_tzname_data.h b/sql/win_tzname_data.h
index 03197227f8e..792cdbc7a13 100644
--- a/sql/win_tzname_data.h
+++ b/sql/win_tzname_data.h
@@ -12,6 +12,7 @@
{L"US Mountain Standard Time","America/Phoenix"},
{L"Mountain Standard Time (Mexico)","America/Chihuahua"},
{L"Mountain Standard Time","America/Denver"},
+{L"Yukon Standard Time","America/Whitehorse"},
{L"Central America Standard Time","America/Guatemala"},
{L"Central Standard Time","America/Chicago"},
{L"Easter Island Standard Time","Pacific/Easter"},
diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc
index 935bacffffc..e5a0dcb2ede 100644
--- a/sql/wsrep_check_opts.cc
+++ b/sql/wsrep_check_opts.cc
@@ -63,7 +63,7 @@ int wsrep_check_opts()
else
{
// non-mysqldump SST requires wsrep_cluster_address on startup
- if (!wsrep_cluster_address || !wsrep_cluster_address[0])
+ if (!wsrep_cluster_address_exists())
{
WSREP_ERROR ("%s SST method requires wsrep_cluster_address to be "
"configured on startup.", wsrep_sst_method);
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
index 934a9701b41..89621619a23 100644
--- a/sql/wsrep_client_service.cc
+++ b/sql/wsrep_client_service.cc
@@ -69,20 +69,13 @@ bool Wsrep_client_service::interrupted(
wsrep::unique_lock<wsrep::mutex>& lock WSREP_UNUSED) const
{
DBUG_ASSERT(m_thd == current_thd);
- /* Underlying mutex in lock object points to LOCK_thd_data, which
- protects m_thd->wsrep_trx(), LOCK_thd_kill protects m_thd->killed.
- Locking order is:
- 1) LOCK_thd_data
- 2) LOCK_thd_kill */
- mysql_mutex_assert_owner(static_cast<mysql_mutex_t*>(lock.mutex().native()));
- mysql_mutex_lock(&m_thd->LOCK_thd_kill);
+ mysql_mutex_assert_owner(static_cast<mysql_mutex_t*>(lock.mutex()->native()));
bool ret= (m_thd->killed != NOT_KILLED);
if (ret)
{
WSREP_DEBUG("wsrep state is interrupted, THD::killed %d trx state %d",
m_thd->killed, m_thd->wsrep_trx().state());
}
- mysql_mutex_unlock(&m_thd->LOCK_thd_kill);
return ret;
}
diff --git a/sql/wsrep_condition_variable.h b/sql/wsrep_condition_variable.h
index 4412154e67b..6ad53a3086c 100644
--- a/sql/wsrep_condition_variable.h
+++ b/sql/wsrep_condition_variable.h
@@ -44,7 +44,7 @@ public:
void wait(wsrep::unique_lock<wsrep::mutex>& lock)
{
- mysql_mutex_t* mutex= static_cast<mysql_mutex_t*>(lock.mutex().native());
+ mysql_mutex_t* mutex= static_cast<mysql_mutex_t*>(lock.mutex()->native());
mysql_cond_wait(&m_cond, mutex);
}
private:
diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc
index c4dbd8c450f..129df8e1577 100644
--- a/sql/wsrep_dummy.cc
+++ b/sql/wsrep_dummy.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014, 2020, MariaDB
+/* Copyright (C) 2014, 2021, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -59,6 +59,12 @@ void wsrep_thd_LOCK(const THD *)
void wsrep_thd_UNLOCK(const THD *)
{ }
+void wsrep_thd_kill_LOCK(const THD *)
+{ }
+
+void wsrep_thd_kill_UNLOCK(const THD *)
+{ }
+
const char *wsrep_thd_conflict_state_str(THD *)
{ return 0; }
@@ -101,14 +107,6 @@ const char* wsrep_thd_client_state_str(const THD*)
const char* wsrep_thd_client_mode_str(const THD*)
{ return 0; }
-void wsrep_thd_auto_increment_variables(THD *thd,
- unsigned long long *offset,
- unsigned long long *increment)
-{
- *offset= thd->variables.auto_increment_offset;
- *increment= thd->variables.auto_increment_increment;
-}
-
const char* wsrep_thd_transaction_state_str(const THD*)
{ return 0; }
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
index 1adbb312ac0..0da71c3eda5 100644
--- a/sql/wsrep_high_priority_service.cc
+++ b/sql/wsrep_high_priority_service.cc
@@ -673,6 +673,17 @@ int Wsrep_replayer_service::apply_write_set(const wsrep::ws_meta& ws_meta,
DBUG_ASSERT(thd->wsrep_trx().active());
DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_replaying);
+ /* Allow tests to block the replayer thread using the DBUG facilities */
+ DBUG_EXECUTE_IF("sync.wsrep_replay_cb",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.wsrep_replay_cb_reached "
+ "WAIT_FOR signal.wsrep_replay_cb";
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+
wsrep_setup_uk_and_fk_checks(thd);
int ret= 0;
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 7732dc0eefe..0f0ef95492b 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -46,6 +46,7 @@
#include <cstdlib>
#include <string>
#include "log_event.h"
+#include "sql_connect.h"
#include <sstream>
@@ -131,6 +132,18 @@ uint wsrep_ignore_apply_errors= 0;
*/
/*
+ * Cached variables
+ */
+
+// Whether the Galera write-set replication provider is set
+// wsrep_provider && strcmp(wsrep_provider, WSREP_NONE)
+bool WSREP_PROVIDER_EXISTS_;
+
+// Whether the Galera write-set replication is enabled
+// global_system_variables.wsrep_on && WSREP_PROVIDER_EXISTS_
+bool WSREP_ON_;
+
+/*
* Other wsrep global variables.
*/
@@ -315,29 +328,31 @@ wsp::node_status local_status;
*/
Wsrep_schema *wsrep_schema= 0;
-static void wsrep_log_cb(wsrep::log::level level, const char *msg)
+static void wsrep_log_cb(wsrep::log::level level,
+ const char*, const char *msg)
{
/*
Silence all wsrep related logging from lib and provider if
wsrep is not enabled.
*/
- if (WSREP_ON)
- {
- switch (level) {
- case wsrep::log::info:
- sql_print_information("WSREP: %s", msg);
- break;
- case wsrep::log::warning:
- sql_print_warning("WSREP: %s", msg);
- break;
- case wsrep::log::error:
- sql_print_error("WSREP: %s", msg);
+ if (!WSREP_ON) return;
+
+ switch (level) {
+ case wsrep::log::info:
+ WSREP_INFO("%s", msg);
+ break;
+ case wsrep::log::warning:
+ WSREP_WARN("%s", msg);
+ break;
+ case wsrep::log::error:
+ WSREP_ERROR("%s", msg);
+ break;
+ case wsrep::log::debug:
+ WSREP_DEBUG("%s", msg);
+ break;
+ case wsrep::log::unknown:
+ WSREP_UNKNOWN("%s", msg);
break;
- case wsrep::log::debug:
- if (wsrep_debug) sql_print_information ("[Debug] WSREP: %s", msg);
- default:
- break;
- }
}
}
@@ -875,13 +890,13 @@ void wsrep_init_startup (bool sst_first)
if (!strcmp(wsrep_provider, WSREP_NONE)) return;
/* Skip replication start if no cluster address */
- if (!wsrep_cluster_address || wsrep_cluster_address[0] == 0) return;
+ if (!wsrep_cluster_address_exists()) return;
/*
Read value of wsrep_new_cluster before wsrep_start_replication(),
the value is reset to FALSE inside wsrep_start_replication.
*/
- if (!wsrep_start_replication()) unireg_abort(1);
+ if (!wsrep_start_replication(wsrep_cluster_address)) unireg_abort(1);
wsrep_create_rollbacker();
wsrep_create_appliers(1);
@@ -1031,7 +1046,7 @@ void wsrep_shutdown_replication()
my_pthread_setspecific_ptr(THR_THD, NULL);
}
-bool wsrep_start_replication()
+bool wsrep_start_replication(const char *wsrep_cluster_address)
{
int rcode;
WSREP_DEBUG("wsrep_start_replication");
@@ -1046,12 +1061,7 @@ bool wsrep_start_replication()
return true;
}
- if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0)
- {
- // if provider is non-trivial, but no address is specified, wait for address
- WSREP_DEBUG("wsrep_start_replication exit due to empty address");
- return true;
- }
+ DBUG_ASSERT(wsrep_cluster_address[0]);
bool const bootstrap(TRUE == wsrep_new_cluster);
wsrep_new_cluster= FALSE;
@@ -1181,10 +1191,17 @@ void wsrep_keys_free(wsrep_key_arr_t* key_arr)
key_arr->keys_len= 0;
}
-void
+/*!
+ * @param thd thread
+ * @param tables list of tables
+ * @param keys prepared keys
+
+ * @return true if parent table append was successfull, otherwise false.
+*/
+bool
wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* tables, wsrep::key_array* keys)
{
- if (!WSREP(thd) || !WSREP_CLIENT(thd)) return;
+ bool fail= false;
TABLE_LIST *table;
thd->release_transactional_locks();
@@ -1195,6 +1212,8 @@ wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* tables, wsrep::key_array* key
open_tables(thd, &tables, &counter, MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL))
{
WSREP_DEBUG("unable to open table for FK checks for %s", thd->query());
+ fail= true;
+ goto exit;
}
for (table= tables; table; table= table->next_local)
@@ -1216,14 +1235,44 @@ wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* tables, wsrep::key_array* key
}
}
+exit:
/* close the table and release MDL locks */
close_thread_tables(thd);
thd->mdl_context.rollback_to_savepoint(mdl_savepoint);
for (table= tables; table; table= table->next_local)
{
table->table= NULL;
+ table->next_global= NULL;
table->mdl_request.ticket= NULL;
}
+
+ return fail;
+}
+
+bool wsrep_reload_ssl()
+{
+ try
+ {
+ std::string opts= Wsrep_server_state::instance().provider().options();
+ if (opts.find("socket.ssl_reload") == std::string::npos)
+ {
+ WSREP_DEBUG("Option `socket.ssl_reload` not found in parameters.");
+ return false;
+ }
+ const std::string reload_ssl_param("socket.ssl_reload=1");
+ enum wsrep::provider::status ret= Wsrep_server_state::instance().provider().options(reload_ssl_param);
+ if (ret)
+ {
+ WSREP_ERROR("Set options returned %d", ret);
+ return true;
+ }
+ return false;
+ }
+ catch (...)
+ {
+ WSREP_ERROR("Failed to get provider options");
+ return true;
+ }
}
/*!
@@ -1962,7 +2011,7 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table,
{
DBUG_ASSERT(thd->variables.wsrep_OSU_method == WSREP_OSU_TOI);
- WSREP_DEBUG("TOI Begin");
+ WSREP_DEBUG("TOI Begin for %s", WSREP_QUERY(thd));
if (wsrep_can_run_in_toi(thd, db, table, table_list) == false)
{
WSREP_DEBUG("No TOI for %s", WSREP_QUERY(thd));
@@ -2052,22 +2101,20 @@ static void wsrep_TOI_end(THD *thd) {
wsrep_to_isolation--;
wsrep::client_state& client_state(thd->wsrep_cs());
DBUG_ASSERT(wsrep_thd_is_local_toi(thd));
- WSREP_DEBUG("TO END: %lld: %s", client_state.toi_meta().seqno().get(),
- WSREP_QUERY(thd));
- if (wsrep_thd_is_local_toi(thd))
+ wsrep_set_SE_checkpoint(client_state.toi_meta().gtid());
+
+ int ret= client_state.leave_toi_local(wsrep::mutable_buffer());
+
+ if (!ret)
{
- wsrep_set_SE_checkpoint(client_state.toi_meta().gtid());
- int ret= client_state.leave_toi_local(wsrep::mutable_buffer());
- if (!ret)
- {
- WSREP_DEBUG("TO END: %lld", client_state.toi_meta().seqno().get());
- }
- else
- {
- WSREP_WARN("TO isolation end failed for: %d, schema: %s, sql: %s",
- ret, (thd->db.str ? thd->db.str : "(null)"), WSREP_QUERY(thd));
- }
+ WSREP_DEBUG("TO END: %lld: %s",
+ client_state.toi_meta().seqno().get(), WSREP_QUERY(thd));
+ }
+ else
+ {
+ WSREP_WARN("TO isolation end failed for: %d, sql: %s",
+ ret, WSREP_QUERY(thd));
}
}
@@ -2381,18 +2428,7 @@ static void wsrep_close_thread(THD *thd)
thd->set_killed(KILL_CONNECTION);
MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (thd));
mysql_mutex_lock(&thd->LOCK_thd_kill);
- if (thd->mysys_var)
- {
- thd->mysys_var->abort=1;
- mysql_mutex_lock(&thd->mysys_var->mutex);
- if (thd->mysys_var->current_cond)
- {
- mysql_mutex_lock(thd->mysys_var->current_mutex);
- mysql_cond_broadcast(thd->mysys_var->current_cond);
- mysql_mutex_unlock(thd->mysys_var->current_mutex);
- }
- mysql_mutex_unlock(&thd->mysys_var->mutex);
- }
+ thd->abort_current_cond_wait(true);
mysql_mutex_unlock(&thd->LOCK_thd_kill);
}
@@ -2441,10 +2477,12 @@ static my_bool kill_remaining_threads(THD *thd, THD *caller_thd)
if (is_client_connection(thd) &&
!abort_replicated(thd) &&
!is_replaying_connection(thd) &&
+ thd_is_connection_alive(thd) &&
thd != caller_thd)
{
+
WSREP_INFO("killing local connection: %lld", (longlong) thd->thread_id);
- close_connection(thd, 0);
+ close_connection(thd);
}
#endif
return 0;
diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h
index b0050a2ebae..db6910030c8 100644
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@ -20,6 +20,7 @@
#ifdef WITH_WSREP
extern bool WSREP_ON_;
+extern bool WSREP_PROVIDER_EXISTS_;
#include <mysql/plugin.h>
#include "mysql/service_wsrep.h"
@@ -203,7 +204,7 @@ extern void wsrep_close_applier_threads(int count);
/* new defines */
extern void wsrep_stop_replication(THD *thd);
-extern bool wsrep_start_replication();
+extern bool wsrep_start_replication(const char *wsrep_cluster_address);
extern void wsrep_shutdown_replication();
extern bool wsrep_must_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
extern bool wsrep_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
@@ -212,7 +213,8 @@ wsrep_sync_wait_upto (THD* thd, wsrep_gtid_t* upto, int timeout);
extern void wsrep_last_committed_id (wsrep_gtid_t* gtid);
extern int wsrep_check_opts();
extern void wsrep_prepend_PATH (const char* path);
-void wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* table, wsrep::key_array* keys);
+extern bool wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* table, wsrep::key_array* keys);
+extern bool wsrep_reload_ssl();
/* Other global variables */
extern wsrep_seqno_t wsrep_locked_seqno;
@@ -221,7 +223,8 @@ extern wsrep_seqno_t wsrep_locked_seqno;
/* use xxxxxx_NNULL macros when thd pointer is guaranteed to be non-null to
* avoid compiler warnings (GCC 6 and later) */
-#define WSREP_NNULL(thd) (WSREP_ON && thd->variables.wsrep_on)
+#define WSREP_NNULL(thd) \
+ (WSREP_PROVIDER_EXISTS_ && thd->variables.wsrep_on)
#define WSREP(thd) \
(thd && WSREP_NNULL(thd))
@@ -251,34 +254,40 @@ void WSREP_LOG(void (*fun)(const char* fmt, ...), const char* fmt, ...);
#define WSREP_INFO(...) WSREP_LOG(sql_print_information, ##__VA_ARGS__)
#define WSREP_WARN(...) WSREP_LOG(sql_print_warning, ##__VA_ARGS__)
#define WSREP_ERROR(...) WSREP_LOG(sql_print_error, ##__VA_ARGS__)
+#define WSREP_UNKNOWN(fmt, ...) WSREP_ERROR("UNKNOWN: " fmt, ##__VA_ARGS__)
#define WSREP_LOG_CONFLICT_THD(thd, role) \
- WSREP_LOG(sql_print_information, \
- "%s: \n " \
- " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \
- " SQL: %s", \
- role, \
- thd_get_thread_id(thd), \
- wsrep_thd_client_mode_str(thd), \
- wsrep_thd_client_state_str(thd), \
- wsrep_thd_transaction_state_str(thd), \
- wsrep_thd_trx_seqno(thd), \
- wsrep_thd_query(thd) \
+ WSREP_INFO("%s: \n " \
+ " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \
+ " SQL: %s", \
+ role, \
+ thd_get_thread_id(thd), \
+ wsrep_thd_client_mode_str(thd), \
+ wsrep_thd_client_state_str(thd), \
+ wsrep_thd_transaction_state_str(thd), \
+ wsrep_thd_trx_seqno(thd), \
+ wsrep_thd_query(thd) \
);
#define WSREP_LOG_CONFLICT(bf_thd, victim_thd, bf_abort) \
if (wsrep_debug || wsrep_log_conflicts) \
{ \
- WSREP_LOG(sql_print_information, "cluster conflict due to %s for threads:", \
- (bf_abort) ? "high priority abort" : "certification failure" \
+ WSREP_INFO("cluster conflict due to %s for threads:", \
+ (bf_abort) ? "high priority abort" : "certification failure" \
); \
if (bf_thd) WSREP_LOG_CONFLICT_THD(bf_thd, "Winning thread"); \
if (victim_thd) WSREP_LOG_CONFLICT_THD(victim_thd, "Victim thread"); \
- WSREP_LOG(sql_print_information, "context: %s:%d", __FILE__, __LINE__); \
+ WSREP_INFO("context: %s:%d", __FILE__, __LINE__); \
}
-#define WSREP_PROVIDER_EXISTS \
- (wsrep_provider && strncasecmp(wsrep_provider, WSREP_NONE, FN_REFLEN))
+#define WSREP_PROVIDER_EXISTS (WSREP_PROVIDER_EXISTS_)
+
+static inline bool wsrep_cluster_address_exists()
+{
+ if (mysqld_server_started)
+ mysql_mutex_assert_owner(&LOCK_global_system_variables);
+ return wsrep_cluster_address && wsrep_cluster_address[0];
+}
#define WSREP_QUERY(thd) (thd->query())
@@ -501,6 +510,7 @@ wsrep::key wsrep_prepare_key_for_toi(const char* db, const char* table,
#define wsrep_thr_deinit() do {} while(0)
#define wsrep_init_globals() do {} while(0)
#define wsrep_create_appliers(X) do {} while(0)
+#define wsrep_cluster_address_exists() (false)
#endif /* WITH_WSREP */
diff --git a/sql/wsrep_notify.cc b/sql/wsrep_notify.cc
index 1d6d13ea6d2..d2d08e92ae7 100644
--- a/sql/wsrep_notify.cc
+++ b/sql/wsrep_notify.cc
@@ -27,10 +27,12 @@ void wsrep_notify_status(enum wsrep::server_state::state status,
return;
}
- char cmd_buf[1 << 16]; // this can be long
- long cmd_len= sizeof(cmd_buf) - 1;
- char* cmd_ptr= cmd_buf;
- long cmd_off= 0;
+ const long cmd_len = (1 << 16) - 1;
+ char* cmd_ptr = (char*) my_malloc(cmd_len + 1, MYF(MY_WME));
+ long cmd_off = 0;
+
+ if (!cmd_ptr)
+ return; // the warning is in the log
cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, "%s",
wsrep_notify_cmd);
@@ -73,6 +75,7 @@ void wsrep_notify_status(enum wsrep::server_state::state status,
{
WSREP_ERROR("Notification buffer too short (%ld). Aborting notification.",
cmd_len);
+ my_free(cmd_ptr);
return;
}
@@ -86,5 +89,6 @@ void wsrep_notify_status(enum wsrep::server_state::state status,
WSREP_ERROR("Notification command failed: %d (%s): \"%s\"",
err, strerror(err), cmd_ptr);
}
+ my_free(cmd_ptr);
}
diff --git a/sql/wsrep_priv.h b/sql/wsrep_priv.h
index e480331ba65..fb8467adc9d 100644
--- a/sql/wsrep_priv.h
+++ b/sql/wsrep_priv.h
@@ -42,7 +42,7 @@ extern wsrep_seqno_t local_seqno;
extern Wsrep_schema* wsrep_schema;
// a helper function
-void wsrep_sst_received(THD*, const wsrep_uuid_t&, wsrep_seqno_t,
+bool wsrep_sst_received(THD*, const wsrep_uuid_t&, wsrep_seqno_t,
const void*, size_t);
void wsrep_notify_status(enum wsrep::server_state::state status,
diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc
index 8a47bbec9b0..5ee6468e9c1 100644
--- a/sql/wsrep_schema.cc
+++ b/sql/wsrep_schema.cc
@@ -159,6 +159,24 @@ private:
THD *m_cur_thd;
};
+class sql_safe_updates
+{
+public:
+ sql_safe_updates(THD* thd)
+ : m_thd(thd)
+ , m_option_bits(thd->variables.option_bits)
+ {
+ thd->variables.option_bits&= ~OPTION_SAFE_UPDATES;
+ }
+ ~sql_safe_updates()
+ {
+ m_thd->variables.option_bits= m_option_bits;
+ }
+private:
+ THD* m_thd;
+ ulonglong m_option_bits;
+};
+
static int execute_SQL(THD* thd, const char* sql, uint length) {
DBUG_ENTER("Wsrep_schema::execute_SQL()");
int err= 0;
@@ -230,6 +248,7 @@ static int open_table(THD* thd,
tables.init_one_table(schema_name,
table_name,
NULL, lock_type);
+ thd->lex->query_tables_own_last= 0;
if (!open_n_lock_single_table(thd, &tables, tables.lock_type, flags)) {
if (thd->is_error()) {
@@ -565,14 +584,24 @@ static int end_index_scan(TABLE* table) {
return 0;
}
-static void make_key(TABLE* table, uchar* key, key_part_map* map, int parts) {
+static void make_key(TABLE* table, uchar** key, key_part_map* map, int parts) {
uint prefix_length= 0;
KEY_PART_INFO* key_part= table->key_info->key_part;
+
for (int i=0; i < parts; i++)
prefix_length += key_part[i].store_length;
+
*map= make_prev_keypart_map(parts);
- key_copy(key, table->record[0], table->key_info, prefix_length);
+
+ if (!(*key= (uchar *) my_malloc(prefix_length + 1, MYF(MY_WME))))
+ {
+ WSREP_ERROR("Failed to allocate memory for key prefix_length %u", prefix_length);
+ assert(0);
+ }
+
+ key_copy(*key, table->record[0], table->key_info, prefix_length);
}
+
} /* namespace Wsrep_schema_impl */
@@ -592,13 +621,15 @@ static void wsrep_init_thd_for_schema(THD *thd)
thd->prior_thr_create_utime= thd->start_utime= thd->thr_create_utime;
- /* */
- thd->variables.wsrep_on = 0;
+ /* No Galera replication */
+ thd->variables.wsrep_on= 0;
/* No binlogging */
- thd->variables.sql_log_bin = 0;
- thd->variables.option_bits &= ~OPTION_BIN_LOG;
+ thd->variables.sql_log_bin= 0;
+ thd->variables.option_bits&= ~OPTION_BIN_LOG;
+ /* No safe updates */
+ thd->variables.option_bits&= ~OPTION_SAFE_UPDATES;
/* No general log */
- thd->variables.option_bits |= OPTION_LOG_OFF;
+ thd->variables.option_bits|= OPTION_LOG_OFF;
/* Read committed isolation to avoid gap locking */
thd->variables.tx_isolation= ISO_READ_COMMITTED;
wsrep_assign_from_threadvars(thd);
@@ -653,6 +684,7 @@ int Wsrep_schema::store_view(THD* thd, const Wsrep_view& view)
Wsrep_schema_impl::wsrep_off wsrep_off(thd);
Wsrep_schema_impl::binlog_off binlog_off(thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
/*
Clean up cluster table and members table.
@@ -899,13 +931,22 @@ int Wsrep_schema::append_fragment(THD* thd,
thd->thread_id,
os.str().c_str(),
transaction_id.get());
+ /* use private query table list for the duration of fragment storing,
+ populated query table list from "parent DML" may cause problems .e.g
+ for virtual column handling
+ */
+ Query_tables_list query_tables_list_backup;
+ thd->lex->reset_n_backup_query_tables_list(&query_tables_list_backup);
+
Wsrep_schema_impl::binlog_off binlog_off(thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
Wsrep_schema_impl::init_stmt(thd);
TABLE* frag_table= 0;
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
{
trans_rollback_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
@@ -919,9 +960,11 @@ int Wsrep_schema::append_fragment(THD* thd,
if ((error= Wsrep_schema_impl::insert(frag_table))) {
WSREP_ERROR("Failed to write to frag table: %d", error);
trans_rollback_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
Wsrep_schema_impl::finish_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(0);
}
@@ -938,15 +981,24 @@ int Wsrep_schema::update_fragment_meta(THD* thd,
ws_meta.seqno().get());
DBUG_ASSERT(ws_meta.seqno().is_undefined() == false);
+ /* use private query table list for the duration of fragment storing,
+ populated query table list from "parent DML" may cause problems .e.g
+ for virtual column handling
+ */
+ Query_tables_list query_tables_list_backup;
+ thd->lex->reset_n_backup_query_tables_list(&query_tables_list_backup);
+
Wsrep_schema_impl::binlog_off binlog_off(thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
int error;
- uchar key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
+ uchar *key=NULL;
key_part_map key_map= 0;
TABLE* frag_table= 0;
Wsrep_schema_impl::init_stmt(thd);
if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
{
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
@@ -954,7 +1006,7 @@ int Wsrep_schema::update_fragment_meta(THD* thd,
Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
Wsrep_schema_impl::store(frag_table, 1, ws_meta.transaction_id().get());
Wsrep_schema_impl::store(frag_table, 2, -1);
- Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+ Wsrep_schema_impl::make_key(frag_table, &key, &key_map, 3);
if ((error= Wsrep_schema_impl::init_for_index_scan(frag_table,
key, key_map)))
@@ -967,9 +1019,12 @@ int Wsrep_schema::update_fragment_meta(THD* thd,
error);
}
Wsrep_schema_impl::finish_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
+ my_free(key);
DBUG_RETURN(1);
}
+ my_free(key);
/* Copy the original record to frag_table->record[1] */
store_record(frag_table, record[1]);
@@ -982,11 +1037,13 @@ int Wsrep_schema::update_fragment_meta(THD* thd,
frag_table->s->table_name.str,
error);
Wsrep_schema_impl::finish_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
int ret= Wsrep_schema_impl::end_index_scan(frag_table);
Wsrep_schema_impl::finish_stmt(thd);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(ret);
}
@@ -1002,7 +1059,7 @@ static int remove_fragment(THD* thd,
seqno.get());
int ret= 0;
int error;
- uchar key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
+ uchar *key= NULL;
key_part_map key_map= 0;
DBUG_ASSERT(server_id.is_undefined() == false);
@@ -1016,7 +1073,7 @@ static int remove_fragment(THD* thd,
Wsrep_schema_impl::store(frag_table, 0, server_id);
Wsrep_schema_impl::store(frag_table, 1, transaction_id.get());
Wsrep_schema_impl::store(frag_table, 2, seqno.get());
- Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+ Wsrep_schema_impl::make_key(frag_table, &key, &key_map, 3);
if ((error= Wsrep_schema_impl::init_for_index_scan(frag_table,
key,
@@ -1038,6 +1095,8 @@ static int remove_fragment(THD* thd,
ret= 1;
}
+ if (key)
+ my_free(key);
Wsrep_schema_impl::end_index_scan(frag_table);
return ret;
}
@@ -1053,6 +1112,7 @@ int Wsrep_schema::remove_fragments(THD* thd,
WSREP_DEBUG("Removing %zu fragments", fragments.size());
Wsrep_schema_impl::wsrep_off wsrep_off(thd);
Wsrep_schema_impl::binlog_off binlog_off(thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
Query_tables_list query_tables_list_backup;
Open_tables_backup open_tables_backup;
@@ -1120,12 +1180,13 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
Wsrep_schema_impl::wsrep_off wsrep_off(&thd);
Wsrep_schema_impl::binlog_off binlog_off(&thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(&thd);
Wsrep_schema_impl::thd_context_switch thd_context_switch(orig_thd, &thd);
int ret= 1;
int error;
TABLE* frag_table= 0;
- uchar key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
+ uchar *key=NULL;
key_part_map key_map= 0;
for (std::vector<wsrep::seqno>::const_iterator i= fragments.begin();
@@ -1142,7 +1203,7 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
Wsrep_schema_impl::store(frag_table, 1, ws_meta.transaction_id().get());
Wsrep_schema_impl::store(frag_table, 2, i->get());
- Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+ Wsrep_schema_impl::make_key(frag_table, &key, &key_map, 3);
int error= Wsrep_schema_impl::init_for_index_scan(frag_table,
key,
@@ -1189,6 +1250,7 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
Wsrep_schema_impl::finish_stmt(&thd);
DBUG_RETURN(1);
}
+
error= Wsrep_schema_impl::init_for_index_scan(frag_table,
key,
key_map);
@@ -1202,6 +1264,7 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
}
error= Wsrep_schema_impl::delete_row(frag_table);
+
if (error)
{
WSREP_WARN("Could not delete row from streaming log table: %d", error);
@@ -1211,8 +1274,12 @@ int Wsrep_schema::replay_transaction(THD* orig_thd,
}
Wsrep_schema_impl::end_index_scan(frag_table);
Wsrep_schema_impl::finish_stmt(&thd);
+ my_free(key);
+ key= NULL;
}
+ if (key)
+ my_free(key);
DBUG_RETURN(ret);
}
@@ -1228,6 +1295,7 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
Wsrep_storage_service storage_service(&storage_thd);
Wsrep_schema_impl::binlog_off binlog_off(&storage_thd);
Wsrep_schema_impl::wsrep_off wsrep_off(&storage_thd);
+ Wsrep_schema_impl::sql_safe_updates sql_safe_updates(&storage_thd);
Wsrep_schema_impl::thd_context_switch thd_context_switch(orig_thd,
&storage_thd);
Wsrep_server_state& server_state(Wsrep_server_state::instance());
diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc
index da021d4a7eb..19259a43925 100644
--- a/sql/wsrep_server_service.cc
+++ b/sql/wsrep_server_service.cc
@@ -40,6 +40,7 @@ static void init_service_thd(THD* thd, char* thread_stack)
thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer();
thd->set_command(COM_SLEEP);
thd->reset_for_next_command(true);
+ server_threads.insert(thd); // as wsrep_innobase_kill_one_trx() uses find_thread_by_id()
}
Wsrep_storage_service*
@@ -79,6 +80,7 @@ void Wsrep_server_service::release_storage_service(
static_cast<Wsrep_storage_service*>(storage_service);
THD* thd= ss->m_thd;
wsrep_reset_threadvars(thd);
+ server_threads.erase(thd);
delete ss;
delete thd;
}
@@ -92,7 +94,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx)
streaming transaction is BF aborted and streaming applier
is created from BF aborter context. */
Wsrep_threadvars saved_threadvars(wsrep_save_threadvars());
- wsrep_reset_threadvars(saved_threadvars.cur_thd);
+ if (saved_threadvars.cur_thd)
+ wsrep_reset_threadvars(saved_threadvars.cur_thd);
THD *thd= 0;
Wsrep_applier_service *ret= 0;
if (!wsrep_create_threadvars() &&
@@ -109,7 +112,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx)
}
/* Restore original thread local storage state before returning. */
wsrep_restore_threadvars(saved_threadvars);
- wsrep_store_threadvars(saved_threadvars.cur_thd);
+ if (saved_threadvars.cur_thd)
+ wsrep_store_threadvars(saved_threadvars.cur_thd);
return ret;
}
@@ -138,6 +142,7 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se
THD* thd= hps->m_thd;
delete hps;
wsrep_store_threadvars(thd);
+ server_threads.erase(thd);
delete thd;
wsrep_delete_threadvars();
}
@@ -162,16 +167,19 @@ void Wsrep_server_service::log_message(enum wsrep::log::level level,
switch (level)
{
case wsrep::log::debug:
- sql_print_information("debug: %s", message);
+ WSREP_DEBUG("%s", message);
break;
case wsrep::log::info:
- sql_print_information("%s", message);
+ WSREP_INFO("%s", message);
break;
case wsrep::log::warning:
- sql_print_warning("%s", message);
+ WSREP_WARN("%s", message);
break;
case wsrep::log::error:
- sql_print_error("%s", message);
+ WSREP_ERROR("%s", message);
+ break;
+ case wsrep::log::unknown:
+ WSREP_UNKNOWN("%s", message);
break;
}
}
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index c024f08dd22..09b388f0868 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -265,6 +265,12 @@ static bool sst_auth_real_set (const char* value)
if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); }
wsrep_sst_auth= my_strdup(WSREP_SST_AUTH_MASK, MYF(0));
}
+ else
+ {
+ if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); }
+ wsrep_sst_auth= NULL;
+ }
+
return 0;
}
return 1;
@@ -308,12 +314,40 @@ bool wsrep_before_SE()
}
// Signal end of SST
-static void wsrep_sst_complete (THD* thd,
- int const rcode)
+static bool wsrep_sst_complete (THD* thd,
+ int const rcode,
+ wsrep::gtid const sst_gtid)
{
Wsrep_client_service client_service(thd, thd->wsrep_cs());
- Wsrep_server_state::instance().sst_received(client_service, rcode);
+ Wsrep_server_state& server_state= Wsrep_server_state::instance();
+ enum wsrep::server_state::state state= server_state.state();
+ bool failed= false;
+ char start_pos_buf[FN_REFLEN];
+ ssize_t len= wsrep::print_to_c_str(sst_gtid, start_pos_buf, FN_REFLEN-1);
+ start_pos_buf[len]='\0';
+
+ // Do not call sst_received if we are not in joiner or
+ // initialized state on server. This is because it
+ // assumes we are on those states. Give error if we are
+ // in incorrect state.
+ if ((state == Wsrep_server_state::s_joiner ||
+ state == Wsrep_server_state::s_initialized))
+ {
+ Wsrep_server_state::instance().sst_received(client_service,
+ rcode);
+ WSREP_INFO("SST succeeded for position %s", start_pos_buf);
+ }
+ else
+ {
+ WSREP_ERROR("SST failed for position %s initialized %d server_state %s",
+ start_pos_buf,
+ server_state.is_initialized(),
+ wsrep::to_c_string(state));
+ failed= true;
+ }
+
wsrep_joiner_monitor_end();
+ return failed;
}
/*
@@ -325,13 +359,15 @@ static void wsrep_sst_complete (THD* thd,
@param seqno [IN] Initial state sequence number
@param state [IN] Always NULL, also ignored by wsrep provider (?)
@param state_len [IN] Always 0, also ignored by wsrep provider (?)
+ @return true when successful, false if error
*/
-void wsrep_sst_received (THD* thd,
+bool wsrep_sst_received (THD* thd,
const wsrep_uuid_t& uuid,
wsrep_seqno_t const seqno,
const void* const state,
size_t const state_len)
{
+ bool error= false;
/*
To keep track of whether the local uuid:seqno should be updated. Also, note
that local state (uuid:seqno) is updated/checkpointed only after we get an
@@ -371,8 +407,10 @@ void wsrep_sst_received (THD* thd,
if (WSREP_ON)
{
int const rcode(seqno < 0 ? seqno : 0);
- wsrep_sst_complete(thd,rcode);
+ error= wsrep_sst_complete(thd,rcode, sst_gtid);
}
+
+ return error;
}
static int sst_scan_uuid_seqno (const char* str,
@@ -653,7 +691,7 @@ err:
/* Read committed isolation to avoid gap locking */
thd->variables.tx_isolation= ISO_READ_COMMITTED;
- wsrep_sst_complete (thd, -err);
+ wsrep_sst_complete (thd, -err, ret_gtid);
delete thd;
my_thread_end();
@@ -732,8 +770,20 @@ static size_t estimate_cmd_len (bool* extra_args)
char c;
while ((c = *arg++) != 0)
{
- /* A whitespace or a single quote requires double quotation marks: */
- if (isspace(c) || c == '\'')
+ /*
+ Space, single quote, ampersand, and I/O redirection characters
+ require text to be enclosed in double quotes:
+ */
+ if (isspace(c) || c == '\'' || c == '&' || c == '|' ||
+#ifdef __WIN__
+ c == '>' || c == '<')
+#else
+ /*
+ The semicolon is used to separate shell commands, so it must be
+ enclosed in double quotes as well:
+ */
+ c == '>' || c == '<' || c == ';')
+#endif
{
quotation= true;
}
@@ -756,10 +806,19 @@ static size_t estimate_cmd_len (bool* extra_args)
while ((c = *arg++) != 0)
{
/*
- A whitespace or a single quote requires double
- quotation marks:
+ Space, single quote, ampersand, and I/O redirection characters
+ require text to be enclosed in double quotes:
*/
- if (isspace(c) || c == '\'')
+ if (isspace(c) || c == '\'' || c == '&' || c == '|' ||
+#ifdef __WIN__
+ c == '>' || c == '<')
+#else
+ /*
+ The semicolon is used to separate shell commands, so it must be
+ enclosed in double quotes as well:
+ */
+ c == '>' || c == '<' || c == ';')
+#endif
{
quotation= true;
}
@@ -840,8 +899,20 @@ static void copy_orig_argv (char* cmd_str)
char c;
while ((c = *arg_scan++) != 0)
{
- /* A whitespace or a single quote requires double quotation marks: */
- if (isspace(c) || c == '\'')
+ /*
+ Space, single quote, ampersand, and I/O redirection characters
+ require text to be enclosed in double quotes:
+ */
+ if (isspace(c) || c == '\'' || c == '&' || c == '|' ||
+#ifdef __WIN__
+ c == '>' || c == '<')
+#else
+ /*
+ The semicolon is used to separate shell commands, so it must be
+ enclosed in double quotes as well:
+ */
+ c == '>' || c == '<' || c == ';')
+#endif
{
quotation= true;
}
@@ -915,10 +986,19 @@ static void copy_orig_argv (char* cmd_str)
while ((c = *arg_scan++) != 0)
{
/*
- A whitespace or a single quote requires double
- quotation marks:
+ Space, single quote, ampersand, and I/O redirection characters
+ require text to be enclosed in double quotes:
*/
- if (isspace(c) || c == '\'')
+ if (isspace(c) || c == '\'' || c == '&' || c == '|' ||
+#ifdef __WIN__
+ c == '>' || c == '<')
+#else
+ /*
+ The semicolon is used to separate shell commands, so it must be
+ enclosed in double quotes as well:
+ */
+ c == '>' || c == '<' || c == ';')
+#endif
{
quotation= true;
}
@@ -1170,6 +1250,19 @@ static ssize_t sst_prepare_mysqldump (const char* addr_in,
*addr_out= addr_in;
}
+ pthread_t monitor;
+ ret = mysql_thread_create (key_wsrep_sst_joiner_monitor, &monitor, NULL, wsrep_sst_joiner_monitor_thread, NULL);
+
+ if (ret)
+ {
+ WSREP_ERROR("sst_prepare_other(): mysql_thread_create() failed: %d (%s)",
+ ret, strerror(ret));
+ return -ret;
+ }
+
+ sst_joiner_completed= false;
+ pthread_detach (monitor);
+
return ret;
}
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index 0f72c132d84..023da27c3c1 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -22,7 +22,6 @@
#include "rpl_rli.h"
#include "log_event.h"
#include "sql_parse.h"
-#include "sql_base.h" // close_thread_tables()
#include "mysqld.h" // start_wsrep_THD();
#include "wsrep_applier.h" // start_wsrep_THD();
#include "mysql/service_wsrep.h"
@@ -126,11 +125,7 @@ bool wsrep_create_appliers(long threads, bool mutex_protected)
return false;
}
- if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0)
- {
- WSREP_DEBUG("wsrep_create_appliers exit due to empty address");
- return false;
- }
+ DBUG_ASSERT(wsrep_cluster_address[0]);
long wsrep_threads=0;
@@ -285,16 +280,14 @@ static void wsrep_rollback_process(THD *rollbacker,
void wsrep_create_rollbacker()
{
- if (wsrep_cluster_address && wsrep_cluster_address[0] != 0)
- {
- Wsrep_thd_args* args(new Wsrep_thd_args(wsrep_rollback_process,
- WSREP_ROLLBACKER_THREAD,
- pthread_self()));
-
- /* create rollbacker */
- if (create_wsrep_THD(args, false))
- WSREP_WARN("Can't create thread to manage wsrep rollback");
- }
+ DBUG_ASSERT(wsrep_cluster_address[0]);
+ Wsrep_thd_args* args(new Wsrep_thd_args(wsrep_rollback_process,
+ WSREP_ROLLBACKER_THREAD,
+ pthread_self()));
+
+ /* create rollbacker */
+ if (create_wsrep_THD(args, false))
+ WSREP_WARN("Can't create thread to manage wsrep rollback");
}
/*
@@ -375,25 +368,6 @@ bool wsrep_bf_abort(const THD* bf_thd, THD* victim_thd)
return ret;
}
-/*
- Get auto increment variables for THD. Use global settings for
- applier threads.
- */
-void wsrep_thd_auto_increment_variables(THD* thd,
- unsigned long long* offset,
- unsigned long long* increment)
-{
- if (wsrep_thd_is_applying(thd) &&
- thd->wsrep_trx().state() != wsrep::transaction::s_replaying)
- {
- *offset= global_system_variables.auto_increment_offset;
- *increment= global_system_variables.auto_increment_increment;
- return;
- }
- *offset= thd->variables.auto_increment_offset;
- *increment= thd->variables.auto_increment_increment;
-}
-
int wsrep_create_threadvars()
{
int ret= 0;
diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h
index 05970e8b12f..bb9bd54b02f 100644
--- a/sql/wsrep_trans_observer.h
+++ b/sql/wsrep_trans_observer.h
@@ -407,8 +407,10 @@ static inline void wsrep_after_apply(THD* thd)
static inline void wsrep_open(THD* thd)
{
DBUG_ENTER("wsrep_open");
- if (WSREP(thd))
+ if (WSREP_ON_)
{
+ /* WSREP_PROVIDER_EXISTS_ cannot be set if WSREP_ON_ is not set */
+ DBUG_ASSERT(WSREP_PROVIDER_EXISTS_);
thd->wsrep_cs().open(wsrep::client_id(thd->thread_id));
thd->wsrep_cs().debug_log_level(wsrep_debug);
if (!thd->wsrep_applier && thd->variables.wsrep_trx_fragment_size)
@@ -431,6 +433,16 @@ static inline void wsrep_close(THD* thd)
DBUG_VOID_RETURN;
}
+static inline void wsrep_cleanup(THD* thd)
+{
+ DBUG_ENTER("wsrep_cleanup");
+ if (thd->wsrep_cs().state() != wsrep::client_state::s_none)
+ {
+ thd->wsrep_cs().cleanup();
+ }
+ DBUG_VOID_RETURN;
+}
+
static inline void
wsrep_wait_rollback_complete_and_acquire_ownership(THD *thd)
{
@@ -442,11 +454,17 @@ wsrep_wait_rollback_complete_and_acquire_ownership(THD *thd)
DBUG_VOID_RETURN;
}
-static inline int wsrep_before_command(THD* thd)
+static inline int wsrep_before_command(THD* thd, bool keep_command_error)
{
return (thd->wsrep_cs().state() != wsrep::client_state::s_none ?
- thd->wsrep_cs().before_command() : 0);
+ thd->wsrep_cs().before_command(keep_command_error) : 0);
+}
+
+static inline int wsrep_before_command(THD* thd)
+{
+ return wsrep_before_command(thd, false);
}
+
/*
Called after each command.
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 5336bc9f508..e4cfd0d89c9 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -25,6 +25,7 @@
#include <my_dir.h>
#include <cstdio>
#include <cstdlib>
+#include "wsrep_trans_observer.h"
ulong wsrep_reject_queries;
@@ -88,10 +89,11 @@ static bool refresh_provider_options()
}
}
-static void wsrep_set_wsrep_on()
+void wsrep_set_wsrep_on()
{
- WSREP_ON_= global_system_variables.wsrep_on && wsrep_provider &&
- strcmp(wsrep_provider, WSREP_NONE);
+ WSREP_PROVIDER_EXISTS_= wsrep_provider &&
+ strncasecmp(wsrep_provider, WSREP_NONE, FN_REFLEN);
+ WSREP_ON_= global_system_variables.wsrep_on && WSREP_PROVIDER_EXISTS_;
}
/* This is intentionally declared as a weak global symbol, so that
@@ -102,7 +104,8 @@ struct handlerton* innodb_hton_ptr __attribute__((weak));
bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
{
- if (var_type == OPT_GLOBAL) {
+ if (var_type == OPT_GLOBAL)
+ {
my_bool saved_wsrep_on= global_system_variables.wsrep_on;
thd->variables.wsrep_on= global_system_variables.wsrep_on;
@@ -110,15 +113,15 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
// If wsrep has not been inited we need to do it now
if (global_system_variables.wsrep_on && wsrep_provider && !wsrep_inited)
{
- char* tmp= strdup(wsrep_provider); // wsrep_init() rewrites provider
- //when fails
-
+ // wsrep_init() rewrites provide if it fails
+ char* tmp= strdup(wsrep_provider);
mysql_mutex_unlock(&LOCK_global_system_variables);
if (wsrep_init())
{
my_error(ER_CANT_OPEN_LIBRARY, MYF(0), tmp, my_error, "wsrep_init failed");
//rcode= true;
+ saved_wsrep_on= false;
}
free(tmp);
@@ -130,6 +133,16 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
wsrep_set_wsrep_on();
+ if (var_type == OPT_GLOBAL)
+ {
+ if (thd->variables.wsrep_on &&
+ thd->wsrep_cs().state() == wsrep::client_state::s_none)
+ {
+ wsrep_open(thd);
+ wsrep_before_command(thd);
+ }
+ }
+
return false;
}
@@ -140,12 +153,57 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var)
if (check_has_super(self, thd, var))
return true;
- if (new_wsrep_on && innodb_hton_ptr && innodb_lock_schedule_algorithm != 0) {
- my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled "
- "if innodb_lock_schedule_algorithm=VATS. Please configure"
- " innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0));
+ if (new_wsrep_on)
+ {
+ if (innodb_hton_ptr && innodb_lock_schedule_algorithm != 0)
+ {
+ my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled "
+ "if innodb_lock_schedule_algorithm=VATS. Please configure"
+ " innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0));
+ return true;
+ }
+
+ if (!WSREP_PROVIDER_EXISTS)
+ {
+ my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) can't be enabled "
+ "if the wsrep_provider is unset or set to 'none'", MYF(0));
+ return true;
+ }
+
+ if (var->type == OPT_SESSION &&
+ !global_system_variables.wsrep_on)
+ {
+ my_message(ER_WRONG_ARGUMENTS,
+ "Can't enable @@session.wsrep_on, "
+ "while @@global.wsrep_on is disabled", MYF(0));
+ return true;
+ }
+ }
+
+ if (thd->in_active_multi_stmt_transaction())
+ {
+ my_error(ER_CANT_DO_THIS_DURING_AN_TRANSACTION, MYF(0));
return true;
}
+
+ if (var->type == OPT_GLOBAL)
+ {
+ /*
+ The global value is about to change. Cleanup
+ the transaction state and close the client
+ state. wsrep_on_update() will take care of
+ reopening it should wsrep_on be re-enabled.
+ */
+ if (global_system_variables.wsrep_on && !new_wsrep_on)
+ {
+ wsrep_commit_empty(thd, true);
+ wsrep_after_statement(thd);
+ wsrep_after_command_ignore_result(thd);
+ wsrep_close(thd);
+ wsrep_cleanup(thd);
+ }
+ }
+
return false;
}
@@ -212,8 +270,11 @@ bool wsrep_start_position_verify (const char* start_str)
return true;
char* endptr;
- wsrep_seqno_t const seqno __attribute__((unused)) // to avoid GCC warnings
- (strtoll(&start_str[uuid_len + 1], &endptr, 10));
+ wsrep_seqno_t const seqno(strtoll(&start_str[uuid_len + 1], &endptr, 10));
+
+ // Do not allow seqno < -1
+ if (*endptr == '\0' && seqno < -1)
+ return true;
// Remaining string was seqno.
if (*endptr == '\0') return false;
@@ -230,12 +291,24 @@ bool wsrep_set_local_position(THD* thd, const char* const value,
size_t const uuid_len= wsrep_uuid_scan(value, length, &uuid);
wsrep_seqno_t const seqno= strtoll(value + uuid_len + 1, NULL, 10);
- if (sst) {
- wsrep_sst_received (thd, uuid, seqno, NULL, 0);
- } else {
- local_uuid= uuid;
- local_seqno= seqno;
- }
+ char start_pos_buf[FN_REFLEN];
+ memcpy(start_pos_buf, value, length);
+ start_pos_buf[length]='\0';
+
+ // If both are same as WSREP_START_POSITION_ZERO just set local
+ if (!strcmp(start_pos_buf, WSREP_START_POSITION_ZERO) &&
+ !strcmp(wsrep_start_position, WSREP_START_POSITION_ZERO))
+ goto set;
+ else
+ WSREP_INFO("SST setting local position to %s current %s", start_pos_buf, wsrep_start_position);
+
+ if (sst)
+ return (wsrep_sst_received (thd, uuid, seqno, NULL, 0));
+
+set:
+ local_uuid= uuid;
+ local_seqno= seqno;
+
return false;
}
@@ -252,19 +325,34 @@ bool wsrep_start_position_check (sys_var *self, THD* thd, set_var* var)
var->save_result.string_value.length);
start_pos_buf[var->save_result.string_value.length]= 0;
+
+ WSREP_DEBUG("SST wsrep_start_position check for new position %s old %s",
+ start_pos_buf, wsrep_start_position);
+
// Verify the format.
if (wsrep_start_position_verify(start_pos_buf)) return true;
+
+ // Give error if position is updated when wsrep is not enabled or
+ // provider is not loaded.
+ if ((!WSREP_ON || !Wsrep_server_state::instance().is_provider_loaded())
+ && strcmp(start_pos_buf, WSREP_START_POSITION_ZERO))
+ {
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_start_position' because "
+ "wsrep is switched off or provider is not loaded");
+ goto err;
+ }
+
/*
As part of further verification, we try to update the value and catch
- errors (if any).
+ errors (if any) only when value actually has been changed.
*/
if (wsrep_set_local_position(thd, var->save_result.string_value.str,
var->save_result.string_value.length,
true))
- {
goto err;
- }
return false;
@@ -286,7 +374,7 @@ bool wsrep_start_position_init (const char* val)
{
if (NULL == val || wsrep_start_position_verify (val))
{
- WSREP_ERROR("Bad initial value for wsrep_start_position: %s",
+ WSREP_ERROR("Bad initial value for wsrep_start_position: %s",
(val ? val : ""));
return true;
}
@@ -400,8 +488,8 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type)
void wsrep_provider_init (const char* value)
{
- WSREP_DEBUG("wsrep_provider_init: %s -> %s",
- (wsrep_provider) ? wsrep_provider : "null",
+ WSREP_DEBUG("wsrep_provider_init: %s -> %s",
+ (wsrep_provider) ? wsrep_provider : "null",
(value) ? value : "null");
if (NULL == value || wsrep_provider_verify (value))
{
@@ -427,20 +515,26 @@ bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var)
bool wsrep_provider_options_update(sys_var *self, THD* thd, enum_var_type type)
{
- enum wsrep::provider::status ret=
- Wsrep_server_state::instance().provider().options(wsrep_provider_options);
- if (ret)
+ if (wsrep_provider_options)
{
- WSREP_ERROR("Set options returned %d", ret);
- refresh_provider_options();
- return true;
+ enum wsrep::provider::status ret=
+ Wsrep_server_state::instance().provider().options(wsrep_provider_options);
+ if (ret)
+ {
+ WSREP_ERROR("Set options returned %d", ret);
+ goto err;
+ }
+
+ return refresh_provider_options();
}
- return refresh_provider_options();
+err:
+ refresh_provider_options();
+ return true;
}
void wsrep_provider_options_init(const char* value)
{
- if (wsrep_provider_options && wsrep_provider_options != value)
+ if (wsrep_provider_options && wsrep_provider_options != value)
my_free((void *)wsrep_provider_options);
wsrep_provider_options= (value) ? my_strdup(value, MYF(0)) : NULL;
}
@@ -469,8 +563,21 @@ bool wsrep_reject_queries_update(sys_var *self, THD* thd, enum_var_type type)
bool wsrep_debug_update(sys_var *self, THD* thd, enum_var_type type)
{
+ // Give warnings if wsrep_debug is set and wsrep is disabled or
+ // provider is not loaded, it will not have any effect
+ if ((!WSREP_ON || !Wsrep_server_state::instance().is_provider_loaded())
+ && wsrep_debug)
+ {
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Setting 'wsrep_debug' has no effect because "
+ "wsrep is switched off");
+ wsrep_debug= 0;
+ }
+ else
Wsrep_server_state::instance().debug_log_level(wsrep_debug);
- return false;
+
+ return false;
}
static int wsrep_cluster_address_verify (const char* cluster_address_str)
@@ -508,41 +615,42 @@ bool wsrep_cluster_address_update (sys_var *self, THD* thd, enum_var_type type)
return false;
}
- /* stop replication is heavy operation, and includes closing all client
+ /* stop replication is heavy operation, and includes closing all client
connections. Closing clients may need to get LOCK_global_system_variables
at least in MariaDB.
-
- Note: releasing LOCK_global_system_variables may cause race condition, if
- there can be several concurrent clients changing wsrep_provider
*/
+ char *tmp= my_strdup(wsrep_cluster_address, MYF(MY_WME));
WSREP_DEBUG("wsrep_cluster_address_update: %s", wsrep_cluster_address);
mysql_mutex_unlock(&LOCK_global_system_variables);
+
+ mysql_mutex_lock(&LOCK_wsrep_cluster_config);
wsrep_stop_replication(thd);
- if (wsrep_start_replication())
+ if (*tmp && wsrep_start_replication(tmp))
{
wsrep_create_rollbacker();
WSREP_DEBUG("Cluster address update creating %ld applier threads running %lu",
wsrep_slave_threads, wsrep_running_applier_threads);
wsrep_create_appliers(wsrep_slave_threads);
}
- /* locking order to be enforced is:
- 1. LOCK_global_system_variables
- 2. LOCK_wsrep_cluster_config
- => have to juggle mutexes to comply with this
- */
-
mysql_mutex_unlock(&LOCK_wsrep_cluster_config);
+
mysql_mutex_lock(&LOCK_global_system_variables);
- mysql_mutex_lock(&LOCK_wsrep_cluster_config);
+ if (strcmp(tmp, wsrep_cluster_address))
+ {
+ my_free((void*)wsrep_cluster_address);
+ wsrep_cluster_address= tmp;
+ }
+ else
+ my_free(tmp);
return false;
}
void wsrep_cluster_address_init (const char* value)
{
- WSREP_DEBUG("wsrep_cluster_address_init: %s -> %s",
- (wsrep_cluster_address) ? wsrep_cluster_address : "null",
+ WSREP_DEBUG("wsrep_cluster_address_init: %s -> %s",
+ (wsrep_cluster_address) ? wsrep_cluster_address : "null",
(value) ? value : "null");
my_free((void*) wsrep_cluster_address);
@@ -631,7 +739,12 @@ static void wsrep_slave_count_change_update ()
bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type)
{
+ if (!wsrep_cluster_address_exists())
+ return false;
+
+ mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ mysql_mutex_lock(&LOCK_global_system_variables);
bool res= false;
wsrep_slave_count_change_update();
@@ -742,6 +855,18 @@ bool wsrep_trx_fragment_size_update(sys_var* self, THD *thd, enum_var_type)
{
WSREP_DEBUG("wsrep_trx_fragment_size_update: %llu",
thd->variables.wsrep_trx_fragment_size);
+
+ // Give error if wsrep_trx_fragment_size is set and wsrep is disabled or
+ // provider is not loaded
+ if (!WSREP_ON || !Wsrep_server_state::instance().is_provider_loaded())
+ {
+ push_warning (thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_trx_fragment_size' because "
+ "wsrep is switched off");
+ return true;
+ }
+
if (thd->variables.wsrep_trx_fragment_size)
{
return thd->wsrep_cs().enable_streaming(
@@ -759,6 +884,18 @@ bool wsrep_trx_fragment_unit_update(sys_var* self, THD *thd, enum_var_type)
{
WSREP_DEBUG("wsrep_trx_fragment_unit_update: %lu",
thd->variables.wsrep_trx_fragment_unit);
+
+ // Give error if wsrep_trx_fragment_unit is set and wsrep is disabled or
+ // provider is not loaded
+ if (!WSREP_ON || !Wsrep_server_state::instance().is_provider_loaded())
+ {
+ push_warning (thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_trx_fragment_unit' because "
+ "wsrep is switched off");
+ return true;
+ }
+
if (thd->variables.wsrep_trx_fragment_size)
{
return thd->wsrep_cs().enable_streaming(
diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h
index 481df02f2d5..b1b2932cdfe 100644
--- a/sql/wsrep_var.h
+++ b/sql/wsrep_var.h
@@ -35,6 +35,7 @@ class set_var;
class THD;
int wsrep_init_vars();
+void wsrep_set_wsrep_on();
#define CHECK_ARGS (sys_var *self, THD* thd, set_var *var)
#define UPDATE_ARGS (sys_var *self, THD* thd, enum_var_type type)
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index fa49b081ad1..9526306a601 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -243,6 +243,20 @@ Archive_share::Archive_share()
}
+Archive_share::~Archive_share()
+{
+ DBUG_PRINT("ha_archive", ("~Archive_share: %p", this));
+ if (archive_write_open)
+ {
+ mysql_mutex_lock(&mutex);
+ (void) close_archive_writer(); // Will reset archive_write_open
+ mysql_mutex_unlock(&mutex);
+ }
+ thr_lock_delete(&lock);
+ mysql_mutex_destroy(&mutex);
+}
+
+
ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), delayed_insert(0), bulk_insert(0)
{
@@ -676,7 +690,6 @@ int ha_archive::close(void)
if (azclose(&archive))
rc= 1;
}
-
DBUG_RETURN(rc);
}
@@ -1547,7 +1560,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded= 0;
stats.auto_increment_value= 1;
share->archive_write.auto_increment= 0;
- my_bitmap_map *org_bitmap= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= tmp_use_all_columns(table, &table->read_set);
while (!(rc= get_row(&archive, table->record[0])))
{
@@ -1568,7 +1581,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
}
}
- tmp_restore_column_map(table->read_set, org_bitmap);
+ tmp_restore_column_map(&table->read_set, org_bitmap);
share->rows_recorded= (ha_rows)writer.rows;
}
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index b9fcf10f96f..35291e469cd 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -46,19 +46,7 @@ public:
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
Archive_share();
- ~Archive_share()
- {
- DBUG_PRINT("ha_archive", ("~Archive_share: %p",
- this));
- if (archive_write_open)
- {
- mysql_mutex_lock(&mutex);
- (void) close_archive_writer();
- mysql_mutex_unlock(&mutex);
- }
- thr_lock_delete(&lock);
- mysql_mutex_destroy(&mutex);
- }
+ virtual ~Archive_share();
int init_archive_writer();
void close_archive_writer();
int write_v1_metafile();
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
index f081dca71c3..1d2331c1a5e 100644
--- a/storage/cassandra/ha_cassandra.cc
+++ b/storage/cassandra/ha_cassandra.cc
@@ -1641,18 +1641,18 @@ int ha_cassandra::index_read_map(uchar *buf, const uchar *key,
char *cass_key;
int cass_key_len;
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len))
{
/* We get here when making lookups like uuid_column='not-an-uuid' */
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
bool found;
if (se->get_slice(cass_key, cass_key_len, &found))
@@ -1726,8 +1726,8 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
cassandra_to_mariadb() calls will use field->store(...) methods, which
require that the column is in the table->write_set
*/
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map;
+ old_map= dbug_tmp_use_all_columns(table, &table->write_set);
/* Start with all fields being NULL */
for (field= table->field + 1; *field; field++)
@@ -1848,7 +1848,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
}
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return res;
}
@@ -1933,7 +1933,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals,
int ha_cassandra::write_row(const uchar *buf)
{
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
int ires;
DBUG_ENTER("ha_cassandra::write_row");
@@ -1943,7 +1943,7 @@ int ha_cassandra::write_row(const uchar *buf)
if (!doing_insert_batch)
se->clear_insert_buffer();
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
insert_lineno++;
@@ -1954,7 +1954,7 @@ int ha_cassandra::write_row(const uchar *buf)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
rowkey_converter->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->start_row_insert(cass_key, cass_key_len);
@@ -1977,7 +1977,7 @@ int ha_cassandra::write_row(const uchar *buf)
free_dynamic_row(&vals, &names);
if (rc)
{
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(rc);
}
}
@@ -1988,7 +1988,7 @@ int ha_cassandra::write_row(const uchar *buf)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
field_converters[i]->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->add_insert_column(field_converters[i]->field->field_name.str, 0,
@@ -1996,7 +1996,7 @@ int ha_cassandra::write_row(const uchar *buf)
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
bool res;
@@ -2263,8 +2263,8 @@ bool ha_cassandra::mrr_start_read()
{
uint key_len;
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map;
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
se->new_lookup_keys();
@@ -2288,7 +2288,7 @@ bool ha_cassandra::mrr_start_read()
break;
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return se->multiget_slice();
}
@@ -2366,7 +2366,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
LEX_STRING *oldnames, *names;
uint oldcount, count;
String oldvalcol, valcol;
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
int res;
DBUG_ENTER("ha_cassandra::update_row");
/* Currently, it is guaranteed that new_data == table->record[0] */
@@ -2374,7 +2374,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
/* For now, just rewrite the full record */
se->clear_insert_buffer();
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
char *old_key;
int old_key_len;
@@ -2387,7 +2387,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
rowkey_converter->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
@@ -2450,7 +2450,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
field_converters[i]->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->add_insert_column(field_converters[i]->field->field_name.str, 0,
@@ -2477,7 +2477,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
res= se->do_insert();
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index 0af3a02a09d..b69d1a04f54 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -82,6 +82,19 @@ ENDIF(UNIX)
#
+# BSON: the new handling of JSON data included temporarily for testing
+#
+
+OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON)
+
+IF(CONNECT_WITH_BSON)
+ SET(CONNECT_SOURCES ${CONNECT_SOURCES}
+ bson.cpp bsonudf.cpp tabbson.cpp bson.h bsonudf.h tabbson.h)
+ add_definitions(-DBSON_SUPPORT)
+ENDIF(CONNECT_WITH_BSON)
+
+
+#
# VCT: the VEC format might be not supported in future versions
#
@@ -318,29 +331,29 @@ ENDIF(CONNECT_WITH_MONGO)
OPTION(CONNECT_WITH_REST "Compile CONNECT storage engine with REST support" ON)
IF(CONNECT_WITH_REST)
- MESSAGE_ONCE(CONNECT_WITH_REST "REST support is ON")
+# MESSAGE(STATUS "=====> REST support is ON")
SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h)
add_definitions(-DREST_SUPPORT)
- FIND_PACKAGE(cpprestsdk QUIET)
- IF (cpprestsdk_FOUND)
- IF(UNIX)
-# INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR})
-# If needed edit next line to set the path to libcpprest.so
- SET(REST_LIBRARY -lcpprest)
- MESSAGE (STATUS ${REST_LIBRARY})
- ELSE(NOT UNIX)
-# Next line sets debug compile mode matching cpprest_2_10d.dll
-# when it was binary installed (can be change later in Visual Studio)
-# Comment it out if not needed depending on your cpprestsdk installation.
- SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd")
- ENDIF(UNIX)
-# IF(REST_LIBRARY) why this? how about Windows
- SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp)
- add_definitions(-DREST_SOURCE)
-# ENDIF()
- ELSE(NOT cpprestsdk_FOUND)
-# MESSAGE(STATUS "=====> cpprestsdk package not found")
- ENDIF (cpprestsdk_FOUND)
+# FIND_PACKAGE(cpprestsdk QUIET)
+# IF (cpprestsdk_FOUND)
+# IF(UNIX)
+## INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR})
+## If needed edit next line to set the path to libcpprest.so
+# SET(REST_LIBRARY -lcpprest)
+# MESSAGE (STATUS ${REST_LIBRARY})
+# ELSE(NOT UNIX)
+## Next line sets debug compile mode matching cpprest_2_10d.dll
+## when it was binary installed (can be change later in Visual Studio)
+## Comment it out if not needed depending on your cpprestsdk installation.
+# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd")
+# ENDIF(UNIX)
+## IF(REST_LIBRARY) why this? how about Windows
+# SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp)
+# add_definitions(-DREST_SOURCE)
+## ENDIF()
+##ELSE(NOT cpprestsdk_FOUND)
+## MESSAGE(STATUS "=====> cpprestsdk package not found")
+# ENDIF (cpprestsdk_FOUND)
ENDIF(CONNECT_WITH_REST)
#
diff --git a/storage/connect/block.h b/storage/connect/block.h
index 2ca9586ee3f..c10fc4761ac 100644
--- a/storage/connect/block.h
+++ b/storage/connect/block.h
@@ -1,25 +1,25 @@
/**************** Block H Declares Source Code File (.H) ***************/
-/* Name: BLOCK.H Version 2.0 */
+/* Name: BLOCK.H Version 2.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998 */
+/* (C) Copyright to the author Olivier BERTRAND 1998 - 2020 */
/* */
/* This file contains the BLOCK pure virtual class definition. */
/*---------------------------------------------------------------------*/
/* Note: one of the main purpose of this base class is to take care */
-/* of the very specific way Plug handles memory allocation. */
+/* of the very specific way Connect handles memory allocation. */
/* Instead of allocating small chunks of storage via new or malloc */
-/* Plug works in its private memory pool in which it does the sub- */
+/* Connect works in its private memory pool in which it does the sub- */
/* allocation using the function PlugSubAlloc. These are never freed */
/* separately but when a transaction is terminated, the entire pool */
/* is set to empty, resulting in a very fast and efficient allocate */
/* process, no garbage collection problem, and an automatic recovery */
-/* procedure (via LongJump) when the memory is exhausted. */
+/* procedure (via throw) when the memory is exhausted. */
/* For this to work new must be given two parameters, first the */
/* global pointer of the Plug application, and an optional pointer to */
/* the memory pool to use, defaulting to NULL meaning using the Plug */
/* standard default memory pool, example: */
-/* tabp = new(g) XTAB("EMPLOYEE"); */
-/* allocates a XTAB class object in the standard Plug memory pool. */
+/* tabp = new(g) XTAB("EMPLOYEE"); */
+/* allocates a XTAB class object in the standard Plug memory pool. */
/***********************************************************************/
#if !defined(BLOCK_DEFINED)
#define BLOCK_DEFINED
@@ -37,21 +37,25 @@ typedef class BLOCK *PBLOCK;
class DllExport BLOCK {
public:
- void * operator new(size_t size, PGLOBAL g, void *p = NULL) {
- xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, p);
- return (PlugSubAlloc(g, p, size));
- } // end of new
+ void *operator new(size_t size, PGLOBAL g, void *mp = NULL) {
+ xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, mp);
+ return PlugSubAlloc(g, mp, size);
+ } // end of new
- virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc
+ void* operator new(size_t size, long long mp) {
+ xtrc(256, "Realloc at: mp=%lld\n", mp);
+ return (void*)mp;
+ } // end of new
+
+ virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc
virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc
-#if !defined(__BORLANDC__)
- // Avoid warning C4291 by defining a matching dummy delete operator
- void operator delete(void *, PGLOBAL, void *) {}
- void operator delete(void *, size_t) {}
-#endif
- virtual ~BLOCK() {}
+ // Avoid gcc errors by defining matching dummy delete operators
+ void operator delete(void*, PGLOBAL, void *) {}
+ void operator delete(void*, long long) {}
+ void operator delete(void*) {}
- }; // end of class BLOCK
+ virtual ~BLOCK() {}
+}; // end of class BLOCK
#endif // !BLOCK_DEFINED
diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp
new file mode 100644
index 00000000000..3c33551cb68
--- /dev/null
+++ b/storage/connect/bson.cpp
@@ -0,0 +1,1788 @@
+/*************** bson CPP Declares Source Code File (.H) ***************/
+/* Name: bson.cpp Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BJSON classes functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* bson.h is header containing the BSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "bson.h"
+
+/***********************************************************************/
+/* Check macro. */
+/***********************************************************************/
+#if defined(_DEBUG)
+#define CheckType(X,Y) if (!X || X ->Type != Y) throw MSG(VALTYPE_NOMATCH);
+#else
+#define CheckType(X,Y)
+#endif
+
+#if defined(__WIN__)
+#define EL "\r\n"
+#else
+#define EL "\n"
+#undef SE_CATCH // Does not work for Linux
+#endif
+
+int GetJsonDefPrec(void);
+
+#if defined(SE_CATCH)
+/**************************************************************************/
+/* This is the support of catching C interrupts to prevent crashes. */
+/**************************************************************************/
+#include <eh.h>
+
+class SE_Exception {
+public:
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
+
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
+}; // end of class SE_Exception
+
+void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) {
+ throw SE_Exception(u, pExp->ExceptionRecord);
+} // end of trans_func
+
+char* GetExceptionDesc(PGLOBAL g, unsigned int e);
+#endif // SE_CATCH
+
+/* --------------------------- Class BDOC ---------------------------- */
+
+/***********************************************************************/
+/* BDOC constructor. */
+/***********************************************************************/
+BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL)
+{
+ jp = NULL;
+ s = NULL;
+ len = 0;
+ pretty = 3;
+ pty[0] = pty[1] = pty[2] = true;
+ comma = false;
+} // end of BDOC constructor
+
+/***********************************************************************/
+/* Parse a json string. */
+/* Note: when pretty is not known, the caller set pretty to 3. */
+/***********************************************************************/
+PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng)
+{
+ size_t i;
+ bool b = false, ptyp = (bool *)pty;
+ PBVAL bvp = NULL;
+
+ s = js;
+ len = lng;
+ xtrc(1, "BDOC::ParseJson: s=%.10s len=%zd\n", s, len);
+
+ if (!s || !len) {
+ strcpy(g->Message, "Void JSON object");
+ return NULL;
+ } // endif s
+
+ // Trying to guess the pretty format
+ if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
+ pty[0] = false;
+
+ try {
+ bvp = NewVal();
+ bvp->Type = TYPE_UNKNOWN;
+
+ for (i = 0; i < len; i++)
+ switch (s[i]) {
+ case '[':
+ if (bvp->Type != TYPE_UNKNOWN)
+ bvp->To_Val = ParseAsArray(i);
+ else
+ bvp->To_Val = ParseArray(++i);
+
+ bvp->Type = TYPE_JAR;
+ break;
+ case '{':
+ if (bvp->Type != TYPE_UNKNOWN) {
+ bvp->To_Val = ParseAsArray(i);
+ bvp->Type = TYPE_JAR;
+ } else {
+ bvp->To_Val = ParseObject(++i);
+ bvp->Type = TYPE_JOB;
+ } // endif Type
+
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) {
+ comma = true;
+ pty[0] = pty[2] = false;
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ throw 3;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+
+ default:
+ if (bvp->Type != TYPE_UNKNOWN) {
+ bvp->To_Val = ParseAsArray(i);
+ bvp->Type = TYPE_JAR;
+ } else if ((bvp->To_Val = MOF(ParseValue(i, NewVal()))))
+ bvp->Type = TYPE_JVAL;
+ else
+ throw 4;
+
+ break;
+ }; // endswitch s[i]
+
+ if (bvp->Type == TYPE_UNKNOWN)
+ sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s);
+ else if (pretty == 3) {
+ for (i = 0; i < 3; i++)
+ if (pty[i]) {
+ pretty = i;
+ break;
+ } // endif pty
+
+ } // endif ptyp
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, G->Message);
+ GetMsg(g);
+ bvp = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ bvp = NULL;
+ } // end catch
+
+ return bvp;
+} // end of ParseJson
+
+/***********************************************************************/
+/* Parse several items as being in an array. */
+/***********************************************************************/
+OFFSET BDOC::ParseAsArray(size_t& i) {
+ if (pty[0] && (!pretty || pretty > 2)) {
+ OFFSET jsp;
+
+ if ((jsp = ParseArray((i = 0))) && pretty == 3)
+ pretty = (pty[0]) ? 0 : 3;
+
+ return jsp;
+ } else
+ strcpy(G->Message, "More than one item in file");
+
+ return 0;
+} // end of ParseAsArray
+
+/***********************************************************************/
+/* Parse a JSON Array. */
+/***********************************************************************/
+OFFSET BDOC::ParseArray(size_t& i)
+{
+ int level = 0;
+ bool b = (!i);
+ PBVAL vlp, firstvlp, lastvlp;
+
+ vlp = firstvlp = lastvlp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case ',':
+ if (level < 2) {
+ sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS);
+ throw 1;
+ } else
+ level = 1;
+
+ break;
+ case ']':
+ if (level == 1) {
+ sprintf(G->Message, "Unexpected ',]' near %.*s", (int) ARGS);
+ throw 1;
+ } // endif level
+
+ return MOF(firstvlp);
+ case '\n':
+ if (!b)
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ if (level == 2) {
+ sprintf(G->Message, "Unexpected value near %.*s", (int) ARGS);
+ throw 1;
+ } else if (lastvlp) {
+ vlp = ParseValue(i, NewVal());
+ lastvlp->Next = MOF(vlp);
+ lastvlp = vlp;
+ } else
+ firstvlp = lastvlp = ParseValue(i, NewVal());
+
+ level = (b) ? 1 : 2;
+ break;
+ }; // endswitch s[i]
+
+ if (b) {
+ // Case of Pretty == 0
+ return MOF(firstvlp);
+ } // endif b
+
+ throw ("Unexpected EOF in array");
+} // end of ParseArray
+
+/***********************************************************************/
+/* Parse a JSON Object. */
+/***********************************************************************/
+OFFSET BDOC::ParseObject(size_t& i)
+{
+ OFFSET key;
+ int level = 0;
+ PBPR bpp, firstbpp, lastbpp;
+
+ bpp = firstbpp = lastbpp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ if (level < 2) {
+ key = ParseString(++i);
+ bpp = NewPair(key);
+
+ if (lastbpp) {
+ lastbpp->Vlp.Next = MOF(bpp);
+ lastbpp = bpp;
+ } else
+ firstbpp = lastbpp = bpp;
+
+ level = 2;
+ } else {
+ sprintf(G->Message, "misplaced string near %.*s", (int) ARGS);
+ throw 2;
+ } // endif level
+
+ break;
+ case ':':
+ if (level == 2) {
+ ParseValue(++i, GetVlp(lastbpp));
+ level = 3;
+ } else {
+ sprintf(G->Message, "Unexpected ':' near %.*s", (int) ARGS);
+ throw 2;
+ } // endif level
+
+ break;
+ case ',':
+ if (level < 3) {
+ sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS);
+ throw 2;
+ } else
+ level = 1;
+
+ break;
+ case '}':
+ if (!(level == 0 || level == 3)) {
+ sprintf(G->Message, "Unexpected '}' near %.*s", (int) ARGS);
+ throw 2;
+ } // endif level
+
+ return MOF(firstbpp);
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ sprintf(G->Message, "Unexpected character '%c' near %.*s",
+ s[i], (int) ARGS);
+ throw 2;
+ }; // endswitch s[i]
+
+ strcpy(G->Message, "Unexpected EOF in Object");
+ throw 2;
+} // end of ParseObject
+
+/***********************************************************************/
+/* Parse a JSON Value. */
+/***********************************************************************/
+PBVAL BDOC::ParseValue(size_t& i, PBVAL bvp)
+{
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
+
+suite:
+ switch (s[i]) {
+ case '[':
+ bvp->To_Val = ParseArray(++i);
+ bvp->Type = TYPE_JAR;
+ break;
+ case '{':
+ bvp->To_Val = ParseObject(++i);
+ bvp->Type = TYPE_JOB;
+ break;
+ case '"':
+ bvp->To_Val = ParseString(++i);
+ bvp->Type = TYPE_STRG;
+ break;
+ case 't':
+ if (!strncmp(s + i, "true", 4)) {
+ bvp->B = true;
+ bvp->Type = TYPE_BOOL;
+ i += 3;
+ } else
+ goto err;
+
+ break;
+ case 'f':
+ if (!strncmp(s + i, "false", 5)) {
+ bvp->B = false;
+ bvp->Type = TYPE_BOOL;
+ i += 4;
+ } else
+ goto err;
+
+ break;
+ case 'n':
+ if (!strncmp(s + i, "null", 4)) {
+ bvp->Type = TYPE_NULL;
+ i += 3;
+ } else
+ goto err;
+
+ break;
+ case '-':
+ default:
+ if (s[i] == '-' || isdigit(s[i]))
+ ParseNumeric(i, bvp);
+ else
+ goto err;
+
+ }; // endswitch s[i]
+
+ return bvp;
+
+err:
+ sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], (int) ARGS);
+ throw 3;
+} // end of ParseValue
+
+/***********************************************************************/
+/* Unescape and parse a JSON string. */
+/***********************************************************************/
+OFFSET BDOC::ParseString(size_t& i)
+{
+ uchar* p;
+ int n = 0;
+
+ // Be sure of memory availability
+ if (((size_t)len + 1 - i) > ((PPOOLHEADER)G->Sarea)->FreeBlk)
+ throw("ParseString: Out of memory");
+
+ // The size to allocate is not known yet
+ p = (uchar*)BsonSubAlloc(0);
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ p[n++] = 0;
+ BsonSubAlloc(n);
+ return MOF(p);
+ case '\\':
+ if (++i < len) {
+ if (s[i] == 'u') {
+ if (len - i > 5) {
+ // if (charset == utf8) {
+ char xs[5];
+ uint hex;
+
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = s[++i];
+ xs[3] = s[++i];
+ xs[4] = 0;
+ hex = strtoul(xs, NULL, 16);
+
+ if (hex < 0x80) {
+ p[n] = (uchar)hex;
+ } else if (hex < 0x800) {
+ p[n++] = (uchar)(0xC0 | (hex >> 6));
+ p[n] = (uchar)(0x80 | (hex & 0x3F));
+ } else if (hex < 0x10000) {
+ p[n++] = (uchar)(0xE0 | (hex >> 12));
+ p[n++] = (uchar)(0x80 | ((hex >> 6) & 0x3f));
+ p[n] = (uchar)(0x80 | (hex & 0x3f));
+ } else
+ p[n] = '?';
+
+#if 0
+ } else {
+ char xs[3];
+ UINT hex;
+
+ i += 2;
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = 0;
+ hex = strtoul(xs, NULL, 16);
+ p[n] = (char)hex;
+ } // endif charset
+#endif // 0
+ } else
+ goto err;
+
+ } else switch (s[i]) {
+ case 't': p[n] = '\t'; break;
+ case 'n': p[n] = '\n'; break;
+ case 'r': p[n] = '\r'; break;
+ case 'b': p[n] = '\b'; break;
+ case 'f': p[n] = '\f'; break;
+ default: p[n] = s[i]; break;
+ } // endswitch
+
+ n++;
+ } else
+ goto err;
+
+ break;
+ default:
+ p[n++] = s[i];
+ break;
+}; // endswitch s[i]
+
+err:
+throw("Unexpected EOF in String");
+} // end of ParseString
+
+/***********************************************************************/
+/* Parse a JSON numeric value. */
+/***********************************************************************/
+void BDOC::ParseNumeric(size_t& i, PBVAL vlp)
+{
+ char buf[50];
+ int n = 0;
+ short nd = 0;
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+
+ for (; i < len; i++) {
+ switch (s[i]) {
+ case '.':
+ if (!found_digit || has_dot || has_e)
+ goto err;
+
+ has_dot = true;
+ break;
+ case 'e':
+ case 'E':
+ if (!found_digit || has_e)
+ goto err;
+
+ has_e = true;
+ found_digit = false;
+ break;
+ case '+':
+ if (!has_e)
+ goto err;
+
+ // fall through
+ case '-':
+ if (found_digit)
+ goto err;
+
+ break;
+ default:
+ if (isdigit(s[i])) {
+ if (has_dot && !has_e)
+ nd++; // Number of decimals
+
+ found_digit = true;
+ } else
+ goto fin;
+
+ }; // endswitch s[i]
+
+ buf[n++] = s[i];
+ } // endfor i
+
+fin:
+ if (found_digit) {
+ buf[n] = 0;
+
+ if (has_dot || has_e) {
+ double dv = atof(buf);
+
+ if (nd >= 6 || dv > FLT_MAX || dv < FLT_MIN) {
+ double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double));
+
+ *dvp = dv;
+ vlp->To_Val = MOF(dvp);
+ vlp->Type = TYPE_DBL;
+ } else {
+ vlp->F = (float)dv;
+ vlp->Type = TYPE_FLOAT;
+ } // endif nd
+
+ vlp->Nd = MY_MIN(nd, 16);
+ } else {
+ longlong iv = strtoll(buf, NULL, 10);
+
+ if (iv > INT_MAX32 || iv < INT_MIN32) {
+ longlong *llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong));
+
+ *llp = iv;
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } else {
+ vlp->N = (int)iv;
+ vlp->Type = TYPE_INTG;
+ } // endif iv
+
+ } // endif has
+
+ i--; // Unstack following character
+ return;
+ } else
+ throw("No digit found");
+
+err:
+ throw("Unexpected EOF in number");
+} // end of ParseNumeric
+
+/***********************************************************************/
+/* Serialize a BJSON document tree: */
+/***********************************************************************/
+PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
+{
+ PSZ str = NULL;
+ bool b = false, err = true;
+ FILE* fs = NULL;
+
+ G->Message[0] = 0;
+
+ try {
+ if (!bvp) {
+ strcpy(g->Message, "Null json tree");
+ throw 1;
+ } else if (!fn) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
+ } else {
+ if (!(fs = fopen(fn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, fn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ throw 2;
+ } else if (pretty >= 2) {
+ // Serialize to a pretty file
+ jp = new(g)JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ b = true;
+ jp = new(g)JOUTFILE(g, fs, pretty);
+ } // endif's
+
+ } // endif's
+
+ switch (bvp->Type) {
+ case TYPE_JAR:
+ err = SerializeArray(bvp->To_Val, b);
+ break;
+ case TYPE_JOB:
+ err = ((b && jp->Prty()) && jp->WriteChr('\t'));
+ err |= SerializeObject(bvp->To_Val);
+ break;
+ case TYPE_JVAL:
+ err = SerializeValue(MVP(bvp->To_Val));
+ break;
+ default:
+ err = SerializeValue(bvp, true);
+ } // endswitch Type
+
+ if (fs) {
+ fputs(EL, fs);
+ fclose(fs);
+ str = (err) ? NULL : strcpy(g->Message, "Ok");
+ } else if (!err) {
+ str = ((JOUTSTR*)jp)->Strp;
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ } else if (G->Message[0])
+ strcpy(g->Message, "Error in Serialize");
+ else
+ GetMsg(g);
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, G->Message);
+ GetMsg(g);
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ str = NULL;
+ } // end catch
+
+ return str;
+} // end of Serialize
+
+
+/***********************************************************************/
+/* Serialize a JSON Array. */
+/***********************************************************************/
+bool BDOC::SerializeArray(OFFSET arp, bool b)
+{
+ bool first = true;
+ PBVAL vp = MVP(arp);
+
+ if (b) {
+ if (jp->Prty()) {
+ if (jp->WriteChr('['))
+ return true;
+ else if (jp->Prty() == 1 && (jp->WriteStr(EL) || jp->WriteChr('\t')))
+ return true;
+
+ } // endif Prty
+
+ } else if (jp->WriteChr('['))
+ return true;
+
+ for (vp; vp; vp = MVP(vp->Next)) {
+ if (first)
+ first = false;
+ else if ((!b || jp->Prty()) && jp->WriteChr(','))
+ return true;
+ else if (b) {
+ if (jp->Prty() < 2 && jp->WriteStr(EL))
+ return true;
+ else if (jp->Prty() == 1 && jp->WriteChr('\t'))
+ return true;
+
+ } // endif b
+
+ if (SerializeValue(vp))
+ return true;
+
+ } // endfor vp
+
+ if (b && jp->Prty() == 1 && jp->WriteStr(EL))
+ return true;
+
+ return ((!b || jp->Prty()) && jp->WriteChr(']'));
+} // end of SerializeArray
+
+/***********************************************************************/
+/* Serialize a JSON Object. */
+/***********************************************************************/
+bool BDOC::SerializeObject(OFFSET obp)
+{
+ bool first = true;
+ PBPR prp = MPP(obp);
+
+ if (jp->WriteChr('{'))
+ return true;
+
+ for (prp; prp; prp = GetNext(prp)) {
+ if (first)
+ first = false;
+ else if (jp->WriteChr(','))
+ return true;
+
+ if (jp->WriteChr('"') ||
+ jp->WriteStr(MZP(prp->Key)) ||
+ jp->WriteChr('"') ||
+ jp->WriteChr(':') ||
+ SerializeValue(GetVlp(prp)))
+ return true;
+
+ } // endfor i
+
+ return jp->WriteChr('}');
+} // end of SerializeObject
+
+/***********************************************************************/
+/* Serialize a JSON Value. */
+/***********************************************************************/
+bool BDOC::SerializeValue(PBVAL jvp, bool b)
+{
+ char buf[64];
+
+ if (jvp) switch (jvp->Type) {
+ case TYPE_JAR:
+ return SerializeArray(jvp->To_Val, false);
+ case TYPE_JOB:
+ return SerializeObject(jvp->To_Val);
+ case TYPE_BOOL:
+ return jp->WriteStr(jvp->B ? "true" : "false");
+ case TYPE_STRG:
+ case TYPE_DTM:
+ if (b) {
+ return jp->WriteStr(MZP(jvp->To_Val));
+ } else
+ return jp->Escape(MZP(jvp->To_Val));
+
+ case TYPE_INTG:
+ sprintf(buf, "%d", jvp->N);
+ return jp->WriteStr(buf);
+ case TYPE_BINT:
+ sprintf(buf, "%lld", *(longlong*)MakePtr(Base, jvp->To_Val));
+ return jp->WriteStr(buf);
+ case TYPE_FLOAT:
+ sprintf(buf, "%.*f", jvp->Nd, jvp->F);
+ return jp->WriteStr(buf);
+ case TYPE_DBL:
+ sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val));
+ return jp->WriteStr(buf);
+ case TYPE_NULL:
+ return jp->WriteStr("null");
+ case TYPE_JVAL:
+ return SerializeValue(MVP(jvp->To_Val));
+ default:
+ return jp->WriteStr("???"); // TODO
+ } // endswitch Type
+
+ return jp->WriteStr("null");
+} // end of SerializeValue
+
+/* --------------------------- Class BJSON --------------------------- */
+
+/***********************************************************************/
+/* Program for sub-allocating Bjson structures. */
+/***********************************************************************/
+void* BJSON::BsonSubAlloc(size_t size)
+{
+ PPOOLHEADER pph; /* Points on area header. */
+ void* memp = G->Sarea;
+
+ size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */
+ pph = (PPOOLHEADER)memp;
+
+ xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n",
+ memp, size, pph->To_Free, pph->FreeBlk);
+
+ if (size > pph->FreeBlk) { /* Not enough memory left in pool */
+ sprintf(G->Message,
+ "Not enough memory for request of %zd (used=%zd free=%zd)",
+ size, pph->To_Free, pph->FreeBlk);
+ xtrc(1, "BsonSubAlloc: %s\n", G->Message);
+
+ if (Throw)
+ throw(1234);
+ else
+ return NULL;
+
+ } /* endif size OS32 code */
+
+ // Do the suballocation the simplest way
+ memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */
+ pph->To_Free += size; /* New offset of pool free block */
+ pph->FreeBlk -= size; /* New size of pool free block */
+ xtrc(16, "Done memp=%p used=%zd free=%zd\n",
+ memp, pph->To_Free, pph->FreeBlk);
+ return memp;
+} // end of BsonSubAlloc
+
+/*********************************************************************************/
+/* Program for SubSet re-initialization of the memory pool. */
+/*********************************************************************************/
+PSZ BJSON::NewStr(PSZ str)
+{
+ if (str) {
+ PSZ sm = (PSZ)BsonSubAlloc(strlen(str) + 1);
+
+ strcpy(sm, str);
+ return sm;
+ } else
+ return NULL;
+
+} // end of NewStr
+
+/*********************************************************************************/
+/* Program for SubSet re-initialization of the memory pool. */
+/*********************************************************************************/
+void BJSON::SubSet(bool b)
+{
+ PPOOLHEADER pph = (PPOOLHEADER)G->Sarea;
+
+ pph->To_Free = (G->Saved_Size) ? G->Saved_Size : sizeof(POOLHEADER);
+ pph->FreeBlk = G->Sarea_Size - pph->To_Free;
+
+ if (b)
+ G->Saved_Size = 0;
+
+} // end of SubSet
+
+/*********************************************************************************/
+/* Set the beginning of suballocations. */
+/*********************************************************************************/
+void BJSON::MemSet(size_t size)
+{
+ PPOOLHEADER pph = (PPOOLHEADER)G->Sarea;
+
+ pph->To_Free = size + sizeof(POOLHEADER);
+ pph->FreeBlk = G->Sarea_Size - pph->To_Free;
+} // end of MemSet
+
+ /* ------------------------ Bobject functions ------------------------ */
+
+/***********************************************************************/
+/* Set a pair vlp to some PVAL values. */
+/***********************************************************************/
+void BJSON::SetPairValue(PBPR brp, PBVAL bvp)
+{
+ if (bvp) {
+ brp->Vlp.To_Val = bvp->To_Val;
+ brp->Vlp.Nd = bvp->Nd;
+ brp->Vlp.Type = bvp->Type;
+ } else {
+ brp->Vlp.To_Val = 0;
+ brp->Vlp.Nd = 0;
+ brp->Vlp.Type = TYPE_NULL;
+ } // endif bvp
+
+} // end of SetPairValue
+
+ /***********************************************************************/
+/* Sub-allocate and initialize a BPAIR. */
+/***********************************************************************/
+PBPR BJSON::NewPair(OFFSET key, int type)
+{
+ PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR));
+
+ bpp->Key = key;
+ bpp->Vlp.Type = type;
+ bpp->Vlp.To_Val = 0;
+ bpp->Vlp.Nd = 0;
+ bpp->Vlp.Next = 0;
+ return bpp;
+} // end of SubAllocPair
+
+/***********************************************************************/
+/* Return the number of pairs in this object. */
+/***********************************************************************/
+int BJSON::GetObjectSize(PBVAL bop, bool b)
+{
+ CheckType(bop, TYPE_JOB);
+ int n = 0;
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ // If b return only non null pairs
+ if (!b || (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL))
+ n++;
+
+ return n;
+} // end of GetObjectSize
+
+/***********************************************************************/
+/* Add a new pair to an Object and return it. */
+/***********************************************************************/
+PBVAL BJSON::AddPair(PBVAL bop, PSZ key, int type)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp;
+ OFFSET nrp = NewPair(key, type);
+
+ if (bop->To_Val) {
+ for (brp = GetObject(bop); brp->Vlp.Next; brp = GetNext(brp));
+
+ brp->Vlp.Next = nrp;
+ } else
+ bop->To_Val = nrp;
+
+ bop->Nd++;
+ return GetVlp(MPP(nrp));
+} // end of AddPair
+
+/***********************************************************************/
+/* Return all object keys as an array. */
+/***********************************************************************/
+PBVAL BJSON::GetKeyList(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ PBVAL arp = NewVal(TYPE_JAR);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ AddArrayValue(arp, MOF(SubAllocVal(brp->Key, TYPE_STRG)));
+
+ return arp;
+} // end of GetKeyList
+
+/***********************************************************************/
+/* Return all object values as an array. */
+/***********************************************************************/
+PBVAL BJSON::GetObjectValList(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ PBVAL arp = NewVal(TYPE_JAR);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ AddArrayValue(arp, DupVal(GetVlp(brp)));
+
+ return arp;
+} // end of GetObjectValList
+
+/***********************************************************************/
+/* Get the value corresponding to the given key. */
+/***********************************************************************/
+PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(GetKey(brp), key))
+ return GetVlp(brp);
+
+ return NULL;
+} // end of GetKeyValue;
+
+/***********************************************************************/
+/* Return the text corresponding to all keys (XML like). */
+/***********************************************************************/
+PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp = GetObject(bop);
+
+ if (brp) {
+ bool b;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(' ');
+
+ b = false;
+ } // endif text
+
+ if (b && !brp->Vlp.Next && !strcmp(MZP(brp->Key), "$date")) {
+ int i;
+ PSZ s;
+
+ GetValueText(g, GetVlp(brp), text);
+ s = text->GetStr();
+ i = (s[1] == '-' ? 2 : 1);
+
+ if (IsNum(s + i)) {
+ // Date is in milliseconds
+ int j = text->GetLength();
+
+ if (j >= 4 + i) {
+ s[j - 3] = 0; // Change it to seconds
+ text->SetLength((uint)strlen(s));
+ } else
+ text->Set(" 0");
+
+ } // endif text
+
+ } else for (; brp; brp = GetNext(brp)) {
+ GetValueText(g, GetVlp(brp), text);
+
+ if (brp->Vlp.Next)
+ text->Append(' ');
+
+ } // endfor brp
+
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
+
+ } // endif bop
+
+ return NULL;
+} // end of GetObjectText;
+
+/***********************************************************************/
+/* Set or add a value corresponding to the given key. */
+/***********************************************************************/
+void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp, prp = NULL;
+
+ if (bop->To_Val) {
+ for (brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(GetKey(brp), key))
+ break;
+ else
+ prp = brp;
+
+ if (!brp)
+ brp = MPP(prp->Vlp.Next = NewPair(key));
+
+ } else
+ brp = MPP(bop->To_Val = NewPair(key));
+
+ SetPairValue(brp, MVP(bvp));
+ bop->Nd++;
+} // end of SetKeyValue
+
+/***********************************************************************/
+/* Merge two objects. */
+/***********************************************************************/
+PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2)
+{
+ CheckType(bop1, TYPE_JOB);
+ CheckType(bop2, TYPE_JOB);
+
+ if (bop1->To_Val)
+ for (PBPR brp = GetObject(bop2); brp; brp = GetNext(brp))
+ SetKeyValue(bop1, GetVlp(brp), GetKey(brp));
+
+ else {
+ bop1->To_Val = bop2->To_Val;
+ bop1->Nd = bop2->Nd;
+ } // endelse To_Val
+
+ return bop1;
+} // end of MergeObject;
+
+/***********************************************************************/
+/* Delete a value corresponding to the given key. */
+/***********************************************************************/
+bool BJSON::DeleteKey(PBVAL bop, PCSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp, pbrp = NULL;
+
+ for (brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(MZP(brp->Key), key)) {
+ if (pbrp) {
+ pbrp->Vlp.Next = brp->Vlp.Next;
+ } else
+ bop->To_Val = brp->Vlp.Next;
+
+ bop->Nd--;
+ return true;;
+ } else
+ pbrp = brp;
+
+ return false;
+} // end of DeleteKey
+
+/***********************************************************************/
+/* True if void or if all members are nulls. */
+/***********************************************************************/
+bool BJSON::IsObjectNull(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL)
+ return false;
+
+ return true;
+} // end of IsObjectNull
+
+/* ------------------------- Barray functions ------------------------ */
+
+/***********************************************************************/
+/* Return the number of values in this object. */
+/***********************************************************************/
+int BJSON::GetArraySize(PBVAL bap, bool b)
+{
+ CheckType(bap, TYPE_JAR);
+ int n = 0;
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp))
+ // If b, return only non null values
+ if (!b || bvp->Type != TYPE_NULL)
+ n++;
+
+ return n;
+} // end of GetArraySize
+
+/***********************************************************************/
+/* Get the Nth value of an Array. */
+/***********************************************************************/
+PBVAL BJSON::GetArrayValue(PBVAL bap, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
+ if (i == n)
+ return bvp;
+
+ return NULL;
+} // end of GetArrayValue
+
+/***********************************************************************/
+/* Add a Value to the Array Value list. */
+/***********************************************************************/
+void BJSON::AddArrayValue(PBVAL bap, OFFSET nbv, int* x)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp, lbp = NULL;
+
+ if (!nbv)
+ nbv = MOF(NewVal());
+
+ for (bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
+ if (x && i == *x)
+ break;
+ else
+ lbp = bvp;
+
+ if (lbp) {
+ MVP(nbv)->Next = lbp->Next;
+ lbp->Next = nbv;
+ } else {
+ MVP(nbv)->Next = bap->To_Val;
+ bap->To_Val = nbv;
+ } // endif lbp
+
+ bap->Nd++;
+} // end of AddArrayValue
+
+/***********************************************************************/
+/* Merge two arrays. */
+/***********************************************************************/
+void BJSON::MergeArray(PBVAL bap1, PBVAL bap2)
+{
+ CheckType(bap1, TYPE_JAR);
+ CheckType(bap2, TYPE_JAR);
+
+ if (bap1->To_Val) {
+ for (PBVAL bvp = GetArray(bap2); bvp; bvp = GetNext(bvp))
+ AddArrayValue(bap1, MOF(DupVal(bvp)));
+
+ } else {
+ bap1->To_Val = bap2->To_Val;
+ bap1->Nd = bap2->Nd;
+ } // endif To_Val
+
+} // end of MergeArray
+
+/***********************************************************************/
+/* Set the nth Value of the Array Value list or add it. */
+/***********************************************************************/
+void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp = NULL;
+
+ if (bap->To_Val)
+ for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp))
+ if (i == n) {
+ SetValueVal(bvp, nvp);
+ return;
+ }
+
+ if (!bvp)
+ AddArrayValue(bap, MOF(nvp));
+
+} // end of SetValue
+
+/***********************************************************************/
+/* Return the text corresponding to all values. */
+/***********************************************************************/
+PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text)
+{
+ CheckType(bap, TYPE_JAR);
+
+ if (bap->To_Val) {
+ bool b;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(" (");
+ else
+ text->Append('(');
+
+ b = false;
+ } // endif text
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) {
+ GetValueText(g, bvp, text);
+
+ if (bvp->Next)
+ text->Append(", ");
+ else if (!b)
+ text->Append(')');
+
+ } // endfor bvp
+
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
+
+ } // endif To_Val
+
+ return NULL;
+} // end of GetText;
+
+/***********************************************************************/
+/* Delete a Value from the Arrays Value list. */
+/***********************************************************************/
+bool BJSON::DeleteValue(PBVAL bap, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp, pvp = NULL;
+
+ for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp))
+ if (i == n) {
+ if (pvp)
+ pvp->Next = bvp->Next;
+ else
+ bap->To_Val = bvp->Next;
+
+ bap->Nd--;
+ return true;;
+ } else
+ pvp = bvp;
+
+ return false;
+} // end of DeleteValue
+
+/***********************************************************************/
+/* True if void or if all members are nulls. */
+/***********************************************************************/
+bool BJSON::IsArrayNull(PBVAL bap)
+{
+ CheckType(bap, TYPE_JAR);
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp))
+ if (bvp->Type != TYPE_NULL)
+ return false;
+
+ return true;
+} // end of IsNull
+
+/* ------------------------- Bvalue functions ------------------------ */
+
+/***********************************************************************/
+/* Sub-allocate and clear a BVAL. */
+/***********************************************************************/
+PBVAL BJSON::NewVal(int type)
+{
+ PBVAL bvp = (PBVAL)BsonSubAlloc(sizeof(BVAL));
+
+ bvp->To_Val = 0;
+ bvp->Nd = 0;
+ bvp->Type = type;
+ bvp->Next = 0;
+ return bvp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL as type. */
+/***********************************************************************/
+PBVAL BJSON::SubAllocVal(OFFSET toval, int type, short nd)
+{
+ PBVAL bvp = NewVal(type);
+
+ bvp->To_Val = toval;
+ bvp->Nd = nd;
+ return bvp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL as string. */
+/***********************************************************************/
+PBVAL BJSON::SubAllocStr(OFFSET toval, short nd)
+{
+ PBVAL bvp = NewVal(TYPE_STRG);
+
+ bvp->To_Val = toval;
+ bvp->Nd = nd;
+ return bvp;
+} // end of SubAllocStr
+
+/***********************************************************************/
+/* Allocate a BVALUE with a given string or numeric value. */
+/***********************************************************************/
+PBVAL BJSON::NewVal(PVAL valp)
+{
+ PBVAL vlp = NewVal();
+
+ SetValue(vlp, valp);
+ return vlp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL from another BVAL. */
+/***********************************************************************/
+PBVAL BJSON::DupVal(PBVAL bvlp) {
+ PBVAL bvp = NewVal();
+
+ *bvp = *bvlp;
+ bvp->Next = 0;
+ return bvp;
+} // end of DupVal
+
+/***********************************************************************/
+/* Return the size of value's value. */
+/***********************************************************************/
+int BJSON::GetSize(PBVAL vlp, bool b)
+{
+ switch (vlp->Type) {
+ case TYPE_JAR:
+ return GetArraySize(vlp);
+ case TYPE_JOB:
+ return GetObjectSize(vlp);
+ default:
+ return 1;
+ } // enswitch Type
+
+} // end of GetSize
+
+PBVAL BJSON::GetBson(PBVAL bvp)
+{
+ PBVAL bp = NULL;
+
+ switch (bvp->Type) {
+ case TYPE_JAR:
+ bp = MVP(bvp->To_Val);
+ break;
+ case TYPE_JOB:
+ bp = GetVlp(MPP(bvp->To_Val));
+ break;
+ default:
+ bp = bvp;
+ break;
+ } // endswitch Type
+
+ return bp;
+} // end of GetBson
+
+/***********************************************************************/
+/* Return the Value's as a Value struct. */
+/***********************************************************************/
+PVAL BJSON::GetValue(PGLOBAL g, PBVAL vp)
+{
+ double d;
+ PVAL valp;
+ PBVAL vlp = vp->Type == TYPE_JVAL ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ case TYPE_BINT:
+ valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd);
+ break;
+ case TYPE_INTG:
+ case TYPE_BOOL:
+ valp = AllocateValue(g, vlp, vlp->Type);
+ break;
+ case TYPE_FLOAT:
+ d = (double)vlp->F;
+ valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd);
+ break;
+ default:
+ valp = NULL;
+ break;
+ } // endswitch Type
+
+ return valp;
+} // end of GetValue
+
+/***********************************************************************/
+/* Return the Value's Integer value. */
+/***********************************************************************/
+int BJSON::GetInteger(PBVAL vp) {
+ int n;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_INTG:
+ n = vlp->N;
+ break;
+ case TYPE_FLOAT:
+ n = (int)vlp->F;
+ break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ n = atoi(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ n = (vlp->B) ? 1 : 0;
+ break;
+ case TYPE_BINT:
+ n = (int)*(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_DBL:
+ n = (int)*(double*)MP(vlp->To_Val);
+ break;
+ default:
+ n = 0;
+ } // endswitch Type
+
+ return n;
+} // end of GetInteger
+
+/***********************************************************************/
+/* Return the Value's Big integer value. */
+/***********************************************************************/
+longlong BJSON::GetBigint(PBVAL vp) {
+ longlong lln;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_BINT:
+ lln = *(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ lln = (longlong)vlp->N;
+ break;
+ case TYPE_FLOAT:
+ lln = (longlong)vlp->F;
+ break;
+ case TYPE_DBL:
+ lln = (longlong)*(double*)MP(vlp->To_Val);
+ break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ lln = atoll(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ lln = (vlp->B) ? 1 : 0;
+ break;
+ default:
+ lln = 0;
+ } // endswitch Type
+
+ return lln;
+} // end of GetBigint
+
+/***********************************************************************/
+/* Return the Value's Double value. */
+/***********************************************************************/
+double BJSON::GetDouble(PBVAL vp)
+{
+ double d;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_DBL:
+ d = *(double*)MP(vlp->To_Val);
+ break;
+ case TYPE_BINT:
+ d = (double)*(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ d = (double)vlp->N;
+ break;
+ case TYPE_FLOAT:
+ d = (double)vlp->F;
+ break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ d = atof(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ d = (vlp->B) ? 1.0 : 0.0;
+ break;
+ default:
+ d = 0.0;
+ } // endswitch Type
+
+ return d;
+} // end of GetDouble
+
+/***********************************************************************/
+/* Return the Value's String value. */
+/***********************************************************************/
+PSZ BJSON::GetString(PBVAL vp, char* buff)
+{
+ char buf[32];
+ char* p = (buff) ? buff : buf;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ p = MZP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ sprintf(p, "%d", vlp->N);
+ break;
+ case TYPE_FLOAT:
+ sprintf(p, "%.*f", vlp->Nd, vlp->F);
+ break;
+ case TYPE_BINT:
+ sprintf(p, "%lld", *(longlong*)MP(vlp->To_Val));
+ break;
+ case TYPE_DBL:
+ sprintf(p, "%.*lf", vlp->Nd, *(double*)MP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ p = (PSZ)((vlp->B) ? "true" : "false");
+ break;
+ case TYPE_NULL:
+ p = (PSZ)"null";
+ break;
+ default:
+ p = NULL;
+ } // endswitch Type
+
+ return (p == buf) ? (PSZ)PlugDup(G, buf) : p;
+} // end of GetString
+
+/***********************************************************************/
+/* Return the Value's String value. */
+/***********************************************************************/
+PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text)
+{
+ if (vlp->Type == TYPE_JOB)
+ return GetObjectText(g, vlp, text);
+ else if (vlp->Type == TYPE_JAR)
+ return GetArrayText(g, vlp, text);
+
+ char buff[32];
+ PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(vlp, buff);
+
+ if (s)
+ text->Append(s);
+ else if (GetJsonNull())
+ text->Append(GetJsonNull());
+
+ return NULL;
+} // end of GetText
+
+void BJSON::SetValueObj(PBVAL vlp, PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ vlp->To_Val = bop->To_Val;
+ vlp->Nd = bop->Nd;
+ vlp->Type = TYPE_JOB;
+} // end of SetValueObj;
+
+void BJSON::SetValueArr(PBVAL vlp, PBVAL bap)
+{
+ CheckType(bap, TYPE_JAR);
+ vlp->To_Val = bap->To_Val;
+ vlp->Nd = bap->Nd;
+ vlp->Type = TYPE_JAR;
+} // end of SetValue;
+
+void BJSON::SetValueVal(PBVAL vlp, PBVAL vp)
+{
+ vlp->To_Val = vp->To_Val;
+ vlp->Nd = vp->Nd;
+ vlp->Type = vp->Type;
+} // end of SetValue;
+
+PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp)
+{
+ if (!vlp)
+ vlp = NewVal();
+
+ if (!valp || valp->IsNull()) {
+ vlp->Type = TYPE_NULL;
+ } else switch (valp->GetType()) {
+ case TYPE_DATE:
+ if (((DTVAL*)valp)->IsFormatted())
+ vlp->To_Val = DupStr(valp->GetCharValue());
+ else {
+ char buf[32];
+
+ vlp->To_Val = DupStr(valp->GetCharString(buf));
+ } // endif Formatted
+
+ vlp->Type = TYPE_DTM;
+ break;
+ case TYPE_STRING:
+ vlp->To_Val = DupStr(valp->GetCharValue());
+ vlp->Type = TYPE_STRG;
+ break;
+ case TYPE_DOUBLE:
+ case TYPE_DECIM:
+ { double d = valp->GetFloatValue();
+ int nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0;
+
+ if (nd > 0 && nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) {
+ vlp->F = (float)valp->GetFloatValue();
+ vlp->Type = TYPE_FLOAT;
+ } else {
+ double* dp = (double*)BsonSubAlloc(sizeof(double));
+
+ *dp = d;
+ vlp->To_Val = MOF(dp);
+ vlp->Type = TYPE_DBL;
+ } // endif Nd
+
+ vlp->Nd = MY_MIN(nd, 16);
+ } break;
+ case TYPE_TINY:
+ vlp->B = valp->GetTinyValue() != 0;
+ vlp->Type = TYPE_BOOL;
+ break;
+ case TYPE_INT:
+ vlp->N = valp->GetIntValue();
+ vlp->Type = TYPE_INTG;
+ break;
+ case TYPE_BIGINT:
+ if (valp->GetBigintValue() >= INT_MIN32 &&
+ valp->GetBigintValue() <= INT_MAX32) {
+ vlp->N = valp->GetIntValue();
+ vlp->Type = TYPE_INTG;
+ } else {
+ longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong));
+
+ *llp = valp->GetBigintValue();
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } // endif BigintValue
+
+ break;
+ default:
+ sprintf(G->Message, "Unsupported typ %d\n", valp->GetType());
+ throw(777);
+ } // endswitch Type
+
+ return vlp;
+} // end of SetValue
+
+/***********************************************************************/
+/* Set the Value's value as the given integer. */
+/***********************************************************************/
+void BJSON::SetInteger(PBVAL vlp, int n)
+{
+ vlp->N = n;
+ vlp->Type = TYPE_INTG;
+} // end of SetInteger
+
+/***********************************************************************/
+/* Set the Value's Boolean value as a tiny integer. */
+/***********************************************************************/
+void BJSON::SetBool(PBVAL vlp, bool b)
+{
+ vlp->B = b;
+ vlp->Type = TYPE_BOOL;
+} // end of SetTiny
+
+/***********************************************************************/
+/* Set the Value's value as the given big integer. */
+/***********************************************************************/
+void BJSON::SetBigint(PBVAL vlp, longlong ll)
+{
+ if (ll >= INT_MIN32 && ll <= INT_MAX32) {
+ vlp->N = (int)ll;
+ vlp->Type = TYPE_INTG;
+ } else {
+ longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong));
+
+ *llp = ll;
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } // endif ll
+
+} // end of SetBigint
+
+/***********************************************************************/
+/* Set the Value's value as the given DOUBLE. */
+/***********************************************************************/
+void BJSON::SetFloat(PBVAL vlp, double d, int prec)
+{
+ int nd = MY_MIN((prec < 0) ? GetJsonDefPrec() : prec, 16);
+
+ if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) {
+ vlp->F = (float)d;
+ vlp->Type = TYPE_FLOAT;
+ } else {
+ double* dp = (double*)BsonSubAlloc(sizeof(double));
+
+ *dp = d;
+ vlp->To_Val = MOF(dp);
+ vlp->Type = TYPE_DBL;
+ } // endif nd
+
+ vlp->Nd = nd;
+} // end of SetFloat
+
+/***********************************************************************/
+/* Set the Value's value as the given DOUBLE representation. */
+/***********************************************************************/
+void BJSON::SetFloat(PBVAL vlp, PSZ s)
+{
+ char *p = strchr(s, '.');
+ int nd = 0;
+ double d = atof(s);
+
+ if (p) {
+ for (++p; isdigit(*p); nd++, p++);
+ for (--p; *p == '0'; nd--, p--);
+ } // endif p
+
+ SetFloat(vlp, d, nd);
+} // end of SetFloat
+
+ /***********************************************************************/
+/* Set the Value's value as the given string. */
+/***********************************************************************/
+void BJSON::SetString(PBVAL vlp, PSZ s, int ci)
+{
+ vlp->To_Val = MOF(s);
+ vlp->Nd = ci;
+ vlp->Type = TYPE_STRG;
+} // end of SetString
+
+/***********************************************************************/
+/* True when its JSON or normal value is null. */
+/***********************************************************************/
+bool BJSON::IsValueNull(PBVAL vlp)
+{
+ bool b;
+
+ switch (vlp->Type) {
+ case TYPE_NULL:
+ b = true;
+ break;
+ case TYPE_JOB:
+ b = IsObjectNull(vlp);
+ break;
+ case TYPE_JAR:
+ b = IsArrayNull(vlp);
+ break;
+ default:
+ b = false;
+ } // endswitch Type
+
+ return b;
+ } // end of IsNull
diff --git a/storage/connect/bson.h b/storage/connect/bson.h
new file mode 100644
index 00000000000..acc36e8e0ed
--- /dev/null
+++ b/storage/connect/bson.h
@@ -0,0 +1,207 @@
+/**************** bson H Declares Source Code File (.H) ****************/
+/* Name: bson.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BSON classe declares. */
+/***********************************************************************/
+#pragma once
+#include <mysql_com.h>
+#include "json.h"
+#include "xobject.h"
+
+#if defined(_DEBUG)
+#define X assert(false);
+#else
+#define X
+#endif
+
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
+
+class BDOC;
+class BOUT;
+class BJSON;
+
+typedef class BDOC* PBDOC;
+typedef class BJSON* PBJSON;
+typedef uint OFFSET;
+
+/***********************************************************************/
+/* Structure BVAL. Binary representation of a JVALUE. */
+/***********************************************************************/
+typedef struct _jvalue {
+ union {
+ OFFSET To_Val; // Offset to a value
+ int N; // An integer value
+ float F; // A float value
+ bool B; // A boolean value True or false (0)
+ };
+ short Nd; // Number of decimals
+ short Type; // The value type
+ OFFSET Next; // Offset to the next value in array
+} BVAL, *PBVAL; // end of struct BVALUE
+
+/***********************************************************************/
+/* Structure BPAIR. The pairs of a json Object. */
+/***********************************************************************/
+typedef struct _jpair {
+ OFFSET Key; // Offset to this pair key name
+ BVAL Vlp; // The value of the pair
+} BPAIR, *PBPR; // end of struct BPAIR
+
+char* NextChr(PSZ s, char sep);
+char* GetJsonNull(void);
+const char* GetFmt(int type, bool un);
+
+DllExport bool IsNum(PSZ s);
+
+/***********************************************************************/
+/* Class BJSON. The class handling all BJSON operations. */
+/***********************************************************************/
+class BJSON : public BLOCK {
+public:
+ // Constructor
+ BJSON(PGLOBAL g, PBVAL vp = NULL)
+ { G = g, Base = G->Sarea; Bvp = vp; Throw = true; }
+
+ // Utility functions
+ inline OFFSET MOF(void *p) {return MakeOff(Base, p);}
+ inline void *MP(OFFSET o) {return MakePtr(Base, o);}
+ inline PBPR MPP(OFFSET o) {return (PBPR)MakePtr(Base, o);}
+ inline PBVAL MVP(OFFSET o) {return (PBVAL)MakePtr(Base, o);}
+ inline PSZ MZP(OFFSET o) {return (PSZ)MakePtr(Base, o);}
+ inline longlong LLN(OFFSET o) {return *(longlong*)MakePtr(Base, o);}
+ inline double DBL(OFFSET o) {return *(double*)MakePtr(Base, o);}
+
+ void Reset(void) {Base = G->Sarea;}
+ void* GetBase(void) { return Base; }
+ void SubSet(bool b = false);
+ void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;}
+ void MemSet(size_t size);
+ void GetMsg(PGLOBAL g) { if (g != G) strcpy(g->Message, G->Message); }
+
+ // SubAlloc functions
+ void* BsonSubAlloc(size_t size);
+ PBPR NewPair(OFFSET key, int type = TYPE_NULL);
+ OFFSET NewPair(PSZ key, int type = TYPE_NULL)
+ {return MOF(NewPair(DupStr(key), type));}
+ PBVAL NewVal(int type = TYPE_NULL);
+ PBVAL NewVal(PVAL valp);
+ PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0);
+ PBVAL SubAllocVal(PBVAL toval, int type = TYPE_NULL, short nd = 0)
+ {return SubAllocVal(MOF(toval), type, nd);}
+ PBVAL SubAllocStr(OFFSET str, short nd = 0);
+ PBVAL SubAllocStr(PSZ str, short nd = 0)
+ {return SubAllocStr(DupStr(str), nd);}
+ PBVAL DupVal(PBVAL bvp);
+ OFFSET DupStr(PSZ str) { return MOF(NewStr(str)); }
+ PSZ NewStr(PSZ str);
+
+ // Array functions
+ inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);}
+ int GetArraySize(PBVAL bap, bool b = false);
+ PBVAL GetArrayValue(PBVAL bap, int i);
+ PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text);
+ void MergeArray(PBVAL bap1,PBVAL bap2);
+ bool DeleteValue(PBVAL bap, int n);
+ void AddArrayValue(PBVAL bap, OFFSET nvp = 0, int* x = NULL);
+ inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL)
+ {AddArrayValue(bap, MOF(nvp), x);}
+ void SetArrayValue(PBVAL bap, PBVAL nvp, int n);
+ bool IsArrayNull(PBVAL bap);
+
+ // Object functions
+ inline PBPR GetObject(PBVAL bop) {return MPP(bop->To_Val);}
+ inline PBPR GetNext(PBPR brp) { return MPP(brp->Vlp.Next); }
+ void SetPairValue(PBPR brp, PBVAL bvp);
+ int GetObjectSize(PBVAL bop, bool b = false);
+ PSZ GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text);
+ PBVAL MergeObject(PBVAL bop1, PBVAL bop2);
+ PBVAL AddPair(PBVAL bop, PSZ key, int type = TYPE_NULL);
+ PSZ GetKey(PBPR prp) {return prp ? MZP(prp->Key) : NULL;}
+ PBVAL GetTo_Val(PBPR prp) {return prp ? MVP(prp->Vlp.To_Val) : NULL;}
+ PBVAL GetVlp(PBPR prp) {return prp ? (PBVAL)&prp->Vlp : NULL;}
+ PBVAL GetKeyValue(PBVAL bop, PSZ key);
+ PBVAL GetKeyList(PBVAL bop);
+ PBVAL GetObjectValList(PBVAL bop);
+ void SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key);
+ inline void SetKeyValue(PBVAL bop, PBVAL vlp, PSZ key)
+ {SetKeyValue(bop, MOF(vlp), key);}
+ bool DeleteKey(PBVAL bop, PCSZ k);
+ bool IsObjectNull(PBVAL bop);
+
+ // Value functions
+ int GetSize(PBVAL vlp, bool b = false);
+ PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);}
+ //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); }
+ PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text);
+ PBVAL GetBson(PBVAL bvp);
+ PSZ GetString(PBVAL vp, char* buff = NULL);
+ int GetInteger(PBVAL vp);
+ long long GetBigint(PBVAL vp);
+ double GetDouble(PBVAL vp);
+ PVAL GetValue(PGLOBAL g, PBVAL vp);
+ void SetValueObj(PBVAL vlp, PBVAL bop);
+ void SetValueArr(PBVAL vlp, PBVAL bap);
+ void SetValueVal(PBVAL vlp, PBVAL vp);
+ PBVAL SetValue(PBVAL vlp, PVAL valp);
+ void SetString(PBVAL vlp, PSZ s, int ci = 0);
+ void SetInteger(PBVAL vlp, int n);
+ void SetBigint(PBVAL vlp, longlong ll);
+ void SetFloat(PBVAL vlp, double f, int nd = -1);
+ void SetFloat(PBVAL vlp, PSZ s);
+ void SetBool(PBVAL vlp, bool b);
+ void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; }
+ bool IsValueNull(PBVAL vlp);
+ bool IsJson(PBVAL vlp) {return vlp ? vlp->Type == TYPE_JAR ||
+ vlp->Type == TYPE_JOB ||
+ vlp->Type == TYPE_JVAL : false;}
+
+ // Members
+ PGLOBAL G;
+ PBVAL Bvp;
+ void *Base;
+ bool Throw;
+
+protected:
+ // Default constructor not to be used
+ BJSON(void) {}
+}; // end of class BJSON
+
+/***********************************************************************/
+/* Class JDOC. The class for parsing and serializing json documents. */
+/***********************************************************************/
+class BDOC : public BJSON {
+public:
+ BDOC(PGLOBAL G);
+
+ bool GetComma(void) { return comma; }
+ int GetPretty(void) { return pretty; }
+ void SetPretty(int pty) { pretty = pty; }
+
+ // Methods
+ PBVAL ParseJson(PGLOBAL g, char* s, size_t n);
+ PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty);
+
+protected:
+ OFFSET ParseArray(size_t& i);
+ OFFSET ParseObject(size_t& i);
+ PBVAL ParseValue(size_t& i, PBVAL bvp);
+ OFFSET ParseString(size_t& i);
+ void ParseNumeric(size_t& i, PBVAL bvp);
+ OFFSET ParseAsArray(size_t& i);
+ bool SerializeArray(OFFSET arp, bool b);
+ bool SerializeObject(OFFSET obp);
+ bool SerializeValue(PBVAL vp, bool b = false);
+
+ // Members used when parsing and serializing
+ JOUT* jp; // Used with serialize
+ char* s; // The Json string to parse
+ size_t len; // The Json string length
+ int pretty; // The pretty style of the file to parse
+ bool pty[3]; // Used to guess what pretty is
+ bool comma; // True if Pretty = 1
+
+ // Default constructor not to be used
+ BDOC(void) {}
+}; // end of class BDOC
diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp
new file mode 100644
index 00000000000..29fe0a6bf22
--- /dev/null
+++ b/storage/connect/bsonudf.cpp
@@ -0,0 +1,6245 @@
+/****************** bsonudf C++ Program Source Code File (.CPP) ******************/
+/* PROGRAM NAME: bsonudf Version 1.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
+/* This program are the BSON User Defined Functions. */
+/*********************************************************************************/
+
+/*********************************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/*********************************************************************************/
+#include <my_global.h>
+#include <mysqld.h>
+#include <mysql.h>
+#include <sql_error.h>
+#include <stdio.h>
+
+#include "bsonudf.h"
+
+#if defined(UNIX) || defined(UNIV_LINUX)
+#define _O_RDONLY O_RDONLY
+#endif
+
+#define MEMFIX 4096
+#if defined(connect_EXPORTS)
+#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M)
+#else
+#define PUSH_WARNING(M) htrc(M)
+#endif
+#define M 6
+
+int JsonDefPrec = -1;
+int GetDefaultPrec(void);
+int IsArgJson(UDF_ARGS* args, uint i);
+void SetChanged(PBSON bsp);
+int GetJsonDefPrec(void);
+
+static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp);
+
+/* --------------------------------- JSON UDF ---------------------------------- */
+
+/*********************************************************************************/
+/* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */
+/*********************************************************************************/
+int GetJsonDefPrec(void) {
+ return (JsonDefPrec < 0) ? GetDefaultPrec() : JsonDefPrec;
+} /* end of GetJsonDefPrec */
+
+/*********************************************************************************/
+/* Program for saving the status of the memory pools. */
+/*********************************************************************************/
+inline void JsonMemSave(PGLOBAL g) {
+ g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free;
+} /* end of JsonMemSave */
+
+/*********************************************************************************/
+/* Program for freeing the memory pools. */
+/*********************************************************************************/
+inline void JsonFreeMem(PGLOBAL g) {
+ g->Activityp = NULL;
+ g = PlugExit(g);
+} /* end of JsonFreeMem */
+
+/*********************************************************************************/
+/* Allocate and initialize a BSON structure. */
+/*********************************************************************************/
+static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp)
+{
+ PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON));
+
+ if (bsp) {
+ strcpy(bsp->Msg, "Binary Json");
+ bsp->Msg[BMX] = 0;
+ bsp->Filename = NULL;
+ bsp->G = g;
+ bsp->Pretty = 2;
+ bsp->Reslen = len;
+ bsp->Changed = false;
+ bsp->Top = bsp->Jsp = (PJSON)jsp;
+ bsp->Bsp = NULL;
+ } else
+ PUSH_WARNING(g->Message);
+
+ return bsp;
+} /* end of BbinAlloc */
+
+/* --------------------------- New Testing BJSON Stuff --------------------------*/
+
+/*********************************************************************************/
+/* SubAlloc a new BJNX class with protection against memory exhaustion. */
+/*********************************************************************************/
+static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len)
+{
+ PBJNX bjnx;
+
+ try {
+ bjnx = new(g) BJNX(g, vlp, type, len);
+ } catch (...) {
+ if (trace(1023))
+ htrc("%s\n", g->Message);
+
+ PUSH_WARNING(g->Message);
+ bjnx = NULL;
+ } // end try/catch
+
+ return bjnx;
+} /* end of BjnxNew */
+
+/* ----------------------------------- BSNX ------------------------------------ */
+
+/*********************************************************************************/
+/* BSNX public constructor. */
+/*********************************************************************************/
+BJNX::BJNX(PGLOBAL g) : BDOC(g)
+{
+ Row = NULL;
+ Bvalp = NULL;
+ Jpnp = NULL;
+ Jp = NULL;
+ Nodes = NULL;
+ Value = NULL;
+ MulVal = NULL;
+ Jpath = NULL;
+ Buf_Type = TYPE_STRING;
+ Long = len;
+ Prec = 0;
+ Nod = 0;
+ Xnod = -1;
+ K = 0;
+ I = -1;
+ Imax = 9;
+ B = 0;
+ Xpd = false;
+ Parsed = false;
+ Found = false;
+ Wr = false;
+ Jb = false;
+ Changed = false;
+ Throw = false;
+} // end of BJNX constructor
+
+/*********************************************************************************/
+/* BSNX public constructor. */
+/*********************************************************************************/
+BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC(g)
+{
+ Row = row;
+ Bvalp = NULL;
+ Jpnp = NULL;
+ Jp = NULL;
+ Nodes = NULL;
+ Value = AllocateValue(g, type, len, prec);
+ MulVal = NULL;
+ Jpath = NULL;
+ Buf_Type = type;
+ Long = len;
+ Prec = prec;
+ Nod = 0;
+ Xnod = -1;
+ K = 0;
+ I = -1;
+ Imax = 9;
+ B = 0;
+ Xpd = false;
+ Parsed = false;
+ Found = false;
+ Wr = wr;
+ Jb = false;
+ Changed = false;
+ Throw = false;
+} // end of BJNX constructor
+
+/*********************************************************************************/
+/* SetJpath: set and parse the json path. */
+/*********************************************************************************/
+my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb)
+{
+ // Check Value was allocated
+ if (Value)
+ Value->SetNullable(true);
+
+ Jpath = path;
+
+ // Parse the json path
+ Parsed = false;
+ Nod = 0;
+ Jb = jb;
+ return ParseJpath(g);
+} // end of SetJpath
+
+/*********************************************************************************/
+/* Analyse array processing options. */
+/*********************************************************************************/
+my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
+{
+ int n = (int)strlen(p);
+ my_bool dg = true, b = false;
+ PJNODE jnp = &Nodes[i];
+
+ if (*p) {
+ if (p[n - 1] == ']') {
+ p[--n] = 0;
+ } else if (!IsNum(p)) {
+ // Wrong array specification
+ sprintf(g->Message, "Invalid array specification %s", p);
+ return true;
+ } // endif p
+
+ } else
+ b = true;
+
+ // To check whether a numeric Rank was specified
+ dg = IsNum(p);
+
+ if (!n) {
+ // Default specifications
+ if (jnp->Op != OP_EXP) {
+ if (Wr) {
+ // Force append
+ jnp->Rank = INT_MAX32;
+ jnp->Op = OP_LE;
+ } else if (Jb) {
+ // Return a Json item
+ jnp->Op = OP_XX;
+ } else if (b) {
+ // Return 1st value (B is the index base)
+ jnp->Rank = B;
+ jnp->Op = OP_LE;
+ } else if (!Value->IsTypeNum()) {
+ jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING);
+ jnp->Op = OP_CNC;
+ } else
+ jnp->Op = OP_ADD;
+
+ } // endif OP
+
+ } else if (dg) {
+ // Return nth value
+ jnp->Rank = atoi(p) - B;
+ jnp->Op = OP_EQ;
+ } else if (Wr) {
+ sprintf(g->Message, "Invalid specification %s in a write path", p);
+ return true;
+ } else if (n == 1) {
+ // Set the Op value;
+ switch (*p) {
+ case '+': jnp->Op = OP_ADD; break;
+ case 'x': jnp->Op = OP_MULT; break;
+ case '>': jnp->Op = OP_MAX; break;
+ case '<': jnp->Op = OP_MIN; break;
+ case '!': jnp->Op = OP_SEP; break; // Average
+ case '#': jnp->Op = OP_NUM; break;
+ case '*': jnp->Op = OP_EXP; break;
+ default:
+ sprintf(g->Message, "Invalid function specification %c", *p);
+ return true;
+ } // endswitch *p
+
+ } else if (*p == '"' && p[n - 1] == '"') {
+ // This is a concat specification
+ jnp->Op = OP_CNC;
+
+ if (n > 2) {
+ // Set concat intermediate string
+ p[n - 1] = 0;
+
+ if (trace(1))
+ htrc("Concat string=%s\n", p + 1);
+
+ jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING);
+ } // endif n
+
+ } else {
+ strcpy(g->Message, "Wrong array specification");
+ return true;
+ } // endif's
+
+#if 0
+ // For calculated arrays, a local Value must be used
+ switch (jnp->Op) {
+ case OP_NUM:
+ jnp->Valp = AllocateValue(g, TYPE_INT);
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ case OP_SEP:
+ if (!IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
+ break;
+ case OP_CNC:
+ if (IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ if (jnp->Valp)
+ MulVal = AllocateValue(g, jnp->Valp);
+#endif // 0
+
+ return false;
+} // end of SetArrayOptions
+
+/*********************************************************************************/
+/* Parse the eventual passed Jpath information. */
+/* This information can be specified in the Fieldfmt column option when */
+/* creating the table. It permits to indicate the position of the node */
+/* corresponding to that column. */
+/*********************************************************************************/
+my_bool BJNX::ParseJpath(PGLOBAL g)
+{
+ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL;
+ int i;
+ my_bool a, mul = false;
+
+ if (Parsed)
+ return false; // Already done
+ else if (!Jpath)
+ // Jpath = Name;
+ return true;
+
+ if (trace(1))
+ htrc("ParseJpath %s\n", SVP(Jpath));
+
+ if (!(pbuf = PlgDBDup(g, Jpath)))
+ return true;
+
+ if (*pbuf == '$') pbuf++;
+ if (*pbuf == '.') pbuf++;
+ if (*pbuf == '[') p1 = pbuf++;
+
+ // Estimate the required number of nodes
+ for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++)
+ Nod++; // One path node found
+
+ if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE))))
+ return true;
+
+ memset(Nodes, 0, (Nod) * sizeof(JNODE));
+
+ // Analyze the Jpath for this column
+ for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, '.');
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ // Jpath must be explicit
+ if (a || *p == 0 || *p == '[' || IsNum(p)) {
+ // Analyse intermediate array processing
+ if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
+ return true;
+
+ } else if (*p == '*') {
+ if (Wr) {
+ sprintf(g->Message, "Invalid specification %c in a write path", *p);
+ return true;
+ } else // Return JSON
+ Nodes[i].Op = OP_XX;
+
+ } else {
+ Nodes[i].Key = p;
+ Nodes[i].Op = OP_EXIST;
+ } // endif's
+
+ } // endfor i, p
+
+ Nod = i;
+//MulVal = AllocateValue(g, Value);
+
+ if (trace(1))
+ for (i = 0; i < Nod; i++)
+ htrc("Node(%d) Key=%s Op=%d Rank=%d\n",
+ i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank);
+
+ Parsed = true;
+ return false;
+} // end of ParseJpath
+
+/*********************************************************************************/
+/* Make a valid key from the passed argument. */
+/*********************************************************************************/
+PSZ BJNX::MakeKey(UDF_ARGS *args, int i)
+{
+ if (args->arg_count > (unsigned)i) {
+ int j = 0, n = args->attribute_lengths[i];
+ my_bool b; // true if attribute is zero terminated
+ PSZ p;
+ PCSZ s = args->attributes[i];
+
+ if (s && *s && (n || *s == '\'')) {
+ if ((b = (!n || !s[n])))
+ n = strlen(s);
+
+ if (IsArgJson(args, i))
+ j = (int)(strchr(s, '_') - s + 1);
+
+ if (j && n > j) {
+ s += j;
+ n -= j;
+ } else if (*s == '\'' && s[n-1] == '\'') {
+ s++;
+ n -= 2;
+ b = false;
+ } // endif *s
+
+ if (n < 1)
+ return NewStr((PSZ)"Key");
+
+ if (!b) {
+ p = (PSZ)BsonSubAlloc(n + 1);
+ memcpy(p, s, n);
+ p[n] = 0;
+ return p;
+ } // endif b
+
+ } // endif s
+
+ return NewStr((PSZ)s);
+ } // endif count
+
+ return NewStr((PSZ)"Key");
+} // end of MakeKey
+
+/*********************************************************************************/
+/* MakeJson: Make the Json tree to serialize. */
+/*********************************************************************************/
+PBVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp, int n)
+{
+ PBVAL vlp, jvp = bvp;
+
+ if (n < Nod -1) {
+ if (bvp->Type == TYPE_JAR) {
+ int ars = GetArraySize(bvp);
+ PJNODE jnp = &Nodes[n];
+
+ jvp = NewVal(TYPE_JAR);
+ jnp->Op = OP_EQ;
+
+ for (int i = 0; i < ars; i++) {
+ jnp->Rank = i;
+ vlp = GetRowValue(g, bvp, n);
+ AddArrayValue(jvp, DupVal(vlp));
+ } // endfor i
+
+ jnp->Op = OP_XX;
+ jnp->Rank = 0;
+ } else if(bvp->Type == TYPE_JOB) {
+ jvp = NewVal(TYPE_JOB);
+
+ for (PBPR prp = GetObject(bvp); prp; prp = GetNext(prp)) {
+ vlp = GetRowValue(g, GetVlp(prp), n + 1);
+ SetKeyValue(jvp, vlp, MZP(prp->Key));
+ } // endfor prp
+
+ } // endif Type
+
+ } // endif n
+
+ Jb = true;
+ return jvp;
+} // end of MakeJson
+
+/*********************************************************************************/
+/* SetValue: Set a value from a BVALUE contains. */
+/*********************************************************************************/
+void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp)
+{
+ if (vlp) {
+ vp->SetNull(false);
+
+ if (Jb) {
+ vp->SetValue_psz(Serialize(g, vlp, NULL, 0));
+ Jb = false;
+ } else switch (vlp->Type) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ vp->SetValue_psz(GetString(vlp));
+ break;
+ case TYPE_INTG:
+ vp->SetValue(GetInteger(vlp));
+ break;
+ case TYPE_BINT:
+ vp->SetValue(GetBigint(vlp));
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetDouble(vlp));
+ else // Get the proper number of decimals
+ vp->SetValue_psz(GetString(vlp));
+
+ break;
+ case TYPE_BOOL:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetInteger(vlp) ? 1 : 0);
+ else
+ vp->SetValue_psz(GetString(vlp));
+
+ break;
+ case TYPE_JAR:
+ vp->SetValue_psz(GetArrayText(g, vlp, NULL));
+ break;
+ case TYPE_JOB:
+ vp->SetValue_psz(GetObjectText(g, vlp, NULL));
+ break;
+ case TYPE_NULL:
+ vp->SetNull(true);
+ default:
+ vp->Reset();
+ } // endswitch Type
+
+ } else {
+ vp->SetNull(true);
+ vp->Reset();
+ } // endif val
+
+} // end of SetJsonValue
+
+/*********************************************************************************/
+/* GetJson: */
+/*********************************************************************************/
+PBVAL BJNX::GetJson(PGLOBAL g)
+{
+ return GetRowValue(g, Row, 0);
+} // end of GetJson
+
+/*********************************************************************************/
+/* ReadValue: */
+/*********************************************************************************/
+void BJNX::ReadValue(PGLOBAL g)
+{
+ Value->SetValue_pval(GetColumnValue(g, Row, 0));
+} // end of ReadValue
+
+/*********************************************************************************/
+/* GetColumnValue: */
+/*********************************************************************************/
+PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i)
+{
+ PBVAL vlp = GetRowValue(g, row, i);
+
+ SetJsonValue(g, Value, vlp);
+ return Value;
+} // end of GetColumnValue
+
+/*********************************************************************************/
+/* GetRowValue: */
+/*********************************************************************************/
+PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i)
+{
+ my_bool expd = false;
+ PBVAL bap;
+ PBVAL vlp = NULL;
+
+ for (; i < Nod && row; i++) {
+ if (Nodes[i].Op == OP_NUM) {
+ Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(row) : 1);
+ vlp = NewVal(Value);
+ return vlp;
+ } else if (Nodes[i].Op == OP_XX) {
+ return MakeJson(g, row, i);
+ } else if (Nodes[i].Op == OP_EXP) {
+ PUSH_WARNING("Expand not supported by this function");
+ return NULL;
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key) {
+ // Expected Array was not there
+ if (Nodes[i].Op == OP_LE) {
+ if (i < Nod - 1)
+ continue;
+ else
+ vlp = row; // DupVal(g, row) ???
+
+ } else {
+ strcpy(g->Message, "Unexpected object");
+ vlp = NULL;
+ } //endif Op
+
+ } else
+ vlp = GetKeyValue(row, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ bap = row;
+
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
+ vlp = GetArrayValue(bap, Nodes[i].Rank);
+ else if (Nodes[i].Op == OP_EXP)
+ return (PBVAL)ExpandArray(g, bap, i);
+ else
+ return NewVal(CalculateArray(g, bap, i));
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ vlp = GetArrayValue(bap, 0);
+ i--;
+ } // endif's
+
+ break;
+ case TYPE_JVAL:
+ vlp = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ vlp = NULL;
+ } // endswitch Type
+
+ row = vlp;
+ } // endfor i
+
+ return vlp;
+} // end of GetRowValue
+
+/*********************************************************************************/
+/* ExpandArray: */
+/*********************************************************************************/
+PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n)
+{
+ strcpy(g->Message, "Expand cannot be done by this function");
+ return NULL;
+} // end of ExpandArray
+
+/*********************************************************************************/
+/* Get the value used for calculating the array. */
+/*********************************************************************************/
+PVAL BJNX::GetCalcValue(PGLOBAL g, PBVAL bap, int n)
+{
+ // For calculated arrays, a local Value must be used
+ int lng = 0;
+ short type, prec = 0;
+ bool b = n < Nod - 1;
+ PVAL valp;
+ PBVAL vlp, vp;
+ OPVAL op = Nodes[n].Op;
+
+ switch (op) {
+ case OP_NUM:
+ type = TYPE_INT;
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ if (!IsTypeNum(Buf_Type)) {
+ type = TYPE_INT;
+ prec = 0;
+
+ for (vlp = GetArray(bap); vlp; vlp = GetNext(vlp)) {
+ vp = (b && IsJson(vlp)) ? GetRowValue(g, vlp, n + 1) : vlp;
+
+ switch (vp->Type) {
+ case TYPE_BINT:
+ if (type == TYPE_INT)
+ type = TYPE_BIGINT;
+
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ type = TYPE_DOUBLE;
+ prec = MY_MAX(prec, vp->Nd);
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ } // endfor vlp
+
+ } else {
+ type = Buf_Type;
+ prec = GetPrecision();
+ } // endif Buf_Type
+
+ break;
+ case OP_SEP:
+ if (IsTypeChar(Buf_Type)) {
+ type = TYPE_DOUBLE;
+ prec = 2;
+ } else {
+ type = Buf_Type;
+ prec = GetPrecision();
+ } // endif Buf_Type
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ type = Buf_Type;
+ lng = Long;
+ prec = GetPrecision();
+ break;
+ case OP_CNC:
+ type = TYPE_STRING;
+
+ if (IsTypeChar(Buf_Type)) {
+ lng = (Long) ? Long : 512;
+ prec = GetPrecision();
+ } else
+ lng = 512;
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ return valp = AllocateValue(g, type, lng, prec);
+} // end of GetCalcValue
+
+/*********************************************************************************/
+/* CalculateArray */
+/*********************************************************************************/
+PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n)
+{
+ int i, ars = GetArraySize(bap), nv = 0;
+ bool err;
+ OPVAL op = Nodes[n].Op;
+ PVAL val[2], vp = GetCalcValue(g, bap, n);
+ PVAL mulval = AllocateValue(g, vp);
+ PBVAL bvrp, bvp;
+ BVAL bval;
+
+ vp->Reset();
+ xtrc(1, "CalculateArray size=%d op=%d\n", ars, op);
+
+ try {
+ for (i = 0; i < ars; i++) {
+ bvrp = GetArrayValue(bap, i);
+ xtrc(1, "i=%d nv=%d\n", i, nv);
+
+ if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) {
+ if (IsValueNull(bvrp)) {
+ SetString(bvrp, NewStr(GetJsonNull()), 0);
+ bvp = bvrp;
+ } else if (n < Nod - 1 && IsJson(bvrp)) {
+ SetValue(&bval, GetColumnValue(g, bvrp, n + 1));
+ bvp = &bval;
+ } else
+ bvp = bvrp;
+
+ if (trace(1))
+ htrc("bvp=%s null=%d\n",
+ GetString(bvp), IsValueNull(bvp) ? 1 : 0);
+
+ if (!nv++) {
+ SetJsonValue(g, vp, bvp);
+ continue;
+ } else
+ SetJsonValue(g, mulval, bvp);
+
+ if (!mulval->IsNull()) {
+ switch (op) {
+ case OP_CNC:
+ if (Nodes[n].CncVal) {
+ val[0] = Nodes[n].CncVal;
+ err = vp->Compute(g, val, 1, op);
+ } // endif CncVal
+
+ val[0] = mulval;
+ err = vp->Compute(g, val, 1, op);
+ break;
+ // case OP_NUM:
+ case OP_SEP:
+ val[0] = vp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, OP_ADD);
+ break;
+ default:
+ val[0] = vp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, op);
+ } // endswitch Op
+
+ if (err)
+ vp->Reset();
+
+ if (trace(1)) {
+ char buf(32);
+
+ htrc("vp='%s' err=%d\n",
+ vp->GetCharString(&buf), err ? 1 : 0);
+ } // endif trace
+
+ } // endif Zero
+
+ } // endif jvrp
+
+ } // endfor i
+
+ if (op == OP_SEP) {
+ // Calculate average
+ mulval->SetValue(nv);
+ val[0] = vp;
+ val[1] = mulval;
+
+ if (vp->Compute(g, val, 2, OP_DIV))
+ vp->Reset();
+
+ } // endif Op
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ return vp;
+} // end of CalculateArray
+
+/***********************************************************************/
+/* GetRow: Set the complete path of the object to be set. */
+/***********************************************************************/
+PBVAL BJNX::GetRow(PGLOBAL g)
+{
+ PBVAL val = NULL;
+ PBVAL arp;
+ PBVAL nwr, row = Row;
+
+ for (int i = 0; i < Nod - 1 && row; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+ else if (Nodes[i].Op == OP_EXP) {
+ PUSH_WARNING("Expand not supported by this function");
+ return NULL;
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key)
+ // Expected Array was not there, wrap the value
+ continue;
+
+ val = GetKeyValue(row, Nodes[i].Key);
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EQ)
+ val = GetArrayValue(arp, Nodes[i].Rank);
+ else
+ val = GetArrayValue(arp, Nodes[i].Rx);
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ val = GetArrayValue(arp, 0);
+ i--;
+ } // endif Nodes
+
+ break;
+ case TYPE_JVAL:
+ val = MVP(row->To_Val);
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ val = NULL;
+ } // endswitch Type
+
+ if (val) {
+ row = val;
+ } else {
+ // Construct missing objects
+ for (i++; row && i < Nod; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+
+ // Construct new row
+ nwr = NewVal();
+
+ if (row->Type == TYPE_JOB) {
+ SetKeyValue(row, MOF(nwr), Nodes[i - 1].Key);
+ } else if (row->Type == TYPE_JAR) {
+ AddArrayValue(row, MOF(nwr));
+ } else {
+ strcpy(g->Message, "Wrong type when writing new row");
+ nwr = NULL;
+ } // endif's
+
+ row = nwr;
+ } // endfor i
+
+ break;
+ } // endelse
+
+ } // endfor i
+
+ return row;
+} // end of GetRow
+
+/***********************************************************************/
+/* WriteValue: */
+/***********************************************************************/
+my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp)
+{
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+ PBVAL jvp = NULL;
+ PBVAL row = GetRow(g);
+
+ if (!row)
+ return true;
+
+ switch (row->Type) {
+ case TYPE_JOB: objp = row; break;
+ case TYPE_JAR: arp = row; break;
+ case TYPE_JVAL: jvp = MVP(row->To_Val); break;
+ default:
+ strcpy(g->Message, "Invalid target type");
+ return true;
+ } // endswitch Type
+
+ if (arp) {
+ if (!Nodes[Nod - 1].Key) {
+ if (Nodes[Nod - 1].Op == OP_EQ)
+ SetArrayValue(arp, jvalp, Nodes[Nod - 1].Rank);
+ else
+ AddArrayValue(arp, MOF(jvalp));
+
+ } // endif Key
+
+ } else if (objp) {
+ if (Nodes[Nod - 1].Key)
+ SetKeyValue(objp, MOF(jvalp), Nodes[Nod - 1].Key);
+
+ } else if (jvp)
+ SetValueVal(jvp, jvalp);
+
+ return false;
+} // end of WriteValue
+
+/*********************************************************************************/
+/* GetRowValue: */
+/*********************************************************************************/
+my_bool BJNX::DeleteItem(PGLOBAL g, PBVAL row)
+{
+ int n = -1;
+ my_bool b = false;
+ bool loop;
+ PBVAL vlp, pvp, rwp;
+
+ do {
+ loop = false;
+ vlp = NULL;
+ pvp = rwp = row;
+
+ for (int i = 0; i < Nod && rwp; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+ else switch (rwp->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key) {
+ vlp = NULL;
+ } else
+ vlp = GetKeyValue(rwp, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EXP) {
+ if (loop) {
+ PUSH_WARNING("Only one expand can be handled");
+ return b;
+ } // endif loop
+
+ n++;
+ } else
+ n = Nodes[i].Rank;
+
+ vlp = GetArrayValue(rwp, n);
+
+ if (GetNext(vlp) && Nodes[i].Op == OP_EXP)
+ loop = true;
+
+ } else
+ vlp = NULL;
+
+ break;
+ case TYPE_JVAL:
+ vlp = rwp;
+ break;
+ default:
+ vlp = NULL;
+ } // endswitch Type
+
+ pvp = rwp;
+ rwp = vlp;
+ vlp = NULL;
+ } // endfor i
+
+ if (rwp) {
+ if (Nodes[Nod - 1].Op == OP_XX) {
+ if (!IsJson(rwp))
+ rwp->Type = TYPE_NULL;
+
+ rwp->To_Val = 0;
+ } else switch (pvp->Type) {
+ case TYPE_JOB:
+ b = DeleteKey(pvp, Nodes[Nod - 1].Key);
+ break;
+ case TYPE_JAR:
+ if (Nodes[Nod - 1].Op == OP_EXP) {
+ pvp->To_Val = 0;
+ loop = false;
+ } else
+ b = DeleteValue(pvp, n);
+
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ } // endif rwp
+
+ } while (loop);
+
+ return b;
+} // end of DeleteItem
+
+/*********************************************************************************/
+/* CheckPath: Checks whether the path exists in the document. */
+/*********************************************************************************/
+my_bool BJNX::CheckPath(PGLOBAL g)
+{
+ PBVAL val = NULL;
+ PBVAL row = Row;
+
+ for (int i = 0; i < Nod && row; i++) {
+ val = NULL;
+
+ if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) {
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (Nodes[i].Key)
+ val = GetKeyValue(row, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ if (!Nodes[i].Key)
+ if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
+ val = GetArrayValue(row, Nodes[i].Rank);
+
+ break;
+ case TYPE_JVAL:
+ val = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ } // endswitch Type
+
+ if (i < Nod-1)
+ if (!(row = (IsJson(val)) ? val : NULL))
+ val = NULL;
+
+ } // endfor i
+
+ return (val != NULL);
+} // end of CheckPath
+
+/*********************************************************************************/
+/* Check if a path was specified and set jvp according to it. */
+/*********************************************************************************/
+my_bool BJNX::CheckPath(PGLOBAL g, UDF_ARGS *args, PBVAL jsp, PBVAL& jvp, int n)
+{
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == STRING_RESULT && args->args[i]) {
+ // A path to a subset of the json tree is given
+ char *path = MakePSZ(g, args, i);
+
+ if (path) {
+ Row = jsp;
+
+ if (SetJpath(g, path))
+ return true;
+
+ if (!(jvp = GetJson(g))) {
+ sprintf(g->Message, "No sub-item at '%s'", path);
+ return true;
+ } else
+ return false;
+
+ } else {
+ strcpy(g->Message, "Path argument is null");
+ return true;
+ } // endif path
+
+ } // endif type
+
+ jvp = jsp;
+ return false;
+} // end of CheckPath
+
+/*********************************************************************************/
+/* Locate a value in a JSON tree: */
+/*********************************************************************************/
+PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k)
+{
+ PSZ str = NULL;
+ my_bool b = false, err = true;
+
+ g->Message[0] = 0;
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ return NULL;
+ } // endif jsp
+
+ try {
+ // Write to the path string
+ Jp = new(g) JOUTSTR(g);
+ Jp->WriteChr('$');
+ Bvalp = jvp;
+ K = k;
+
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ err = LocateArray(g, jsp);
+ break;
+ case TYPE_JOB:
+ err = LocateObject(g, jsp);
+ break;
+ case TYPE_JVAL:
+ err = LocateValue(g, MVP(jsp->To_Val));
+ break;
+ default:
+ err = true;
+ } // endswitch Type
+
+ if (err) {
+ if (!g->Message[0])
+ strcpy(g->Message, "Invalid json tree");
+
+ } else if (Found) {
+ Jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, Jp->N);
+ str = Jp->Strp;
+ } // endif's
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ return str;
+} // end of Locate
+
+/*********************************************************************************/
+/* Locate in a JSON Array. */
+/*********************************************************************************/
+my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp)
+{
+ char s[16];
+ int n = GetArraySize(jarp);
+ size_t m = Jp->N;
+
+ for (int i = 0; i < n && !Found; i++) {
+ Jp->N = m;
+ sprintf(s, "[%d]", i + B);
+
+ if (Jp->WriteStr(s))
+ return true;
+
+ if (LocateValue(g, GetArrayValue(jarp, i)))
+ return true;
+
+ } // endfor i
+
+ return false;
+} // end of LocateArray
+
+/*********************************************************************************/
+/* Locate in a JSON Object. */
+/*********************************************************************************/
+my_bool BJNX::LocateObject(PGLOBAL g, PBVAL jobp)
+{
+ size_t m;
+
+ if (Jp->WriteChr('.'))
+ return true;
+
+ m = Jp->N;
+
+ for (PBPR pair = GetObject(jobp); pair && !Found; pair = GetNext(pair)) {
+ Jp->N = m;
+
+ if (Jp->WriteStr(MZP(pair->Key)))
+ return true;
+
+ if (LocateValue(g, GetVlp(pair)))
+ return true;
+
+ } // endfor i
+
+ return false;
+} // end of LocateObject
+
+/*********************************************************************************/
+/* Locate a JSON Value. */
+/*********************************************************************************/
+my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp)
+{
+ if (CompareTree(g, Bvalp, jvp))
+ Found = (--K == 0);
+ else if (jvp->Type == TYPE_JAR)
+ return LocateArray(g, jvp);
+ else if (jvp->Type == TYPE_JOB)
+ return LocateObject(g, jvp);
+
+ return false;
+} // end of LocateValue
+
+/*********************************************************************************/
+/* Locate all occurrences of a value in a JSON tree: */
+/*********************************************************************************/
+PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx)
+{
+ PSZ str = NULL;
+ my_bool b = false, err = true;
+ PJPN jnp;
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ return NULL;
+ } // endif jsp
+
+ try {
+ jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx);
+ memset(jnp, 0, sizeof(JPN) * mx);
+ g->Message[0] = 0;
+
+ // Write to the path string
+ Jp = new(g)JOUTSTR(g);
+ Bvalp = bvp;
+ Imax = mx - 1;
+ Jpnp = jnp;
+ Jp->WriteChr('[');
+
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ err = LocateArrayAll(g, jsp);
+ break;
+ case TYPE_JOB:
+ err = LocateObjectAll(g, jsp);
+ break;
+ case TYPE_JVAL:
+ err = LocateValueAll(g, MVP(jsp->To_Val));
+ break;
+ default:
+ err = LocateValueAll(g, jsp);
+ } // endswitch Type
+
+ if (!err) {
+ if (Jp->N > 1)
+ Jp->N--;
+
+ Jp->WriteChr(']');
+ Jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, Jp->N);
+ str = Jp->Strp;
+ } else if (!g->Message[0])
+ strcpy(g->Message, "Invalid json tree");
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ return str;
+} // end of LocateAll
+
+/*********************************************************************************/
+/* Locate in a JSON Array. */
+/*********************************************************************************/
+my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp)
+{
+ int i = 0;
+
+ if (I < Imax) {
+ Jpnp[++I].Type = TYPE_JAR;
+
+ for (PBVAL vp = GetArray(jarp); vp; vp = GetNext(vp)) {
+ Jpnp[I].N = i;
+
+ if (LocateValueAll(g, GetArrayValue(jarp, i)))
+ return true;
+
+ i++;
+ } // endfor i
+
+ I--;
+ } // endif I
+
+ return false;
+} // end of LocateArrayAll
+
+/*********************************************************************************/
+/* Locate in a JSON Object. */
+/*********************************************************************************/
+my_bool BJNX::LocateObjectAll(PGLOBAL g, PBVAL jobp)
+{
+ if (I < Imax) {
+ Jpnp[++I].Type = TYPE_JOB;
+
+ for (PBPR pair = GetObject(jobp); pair; pair = GetNext(pair)) {
+ Jpnp[I].Key = MZP(pair->Key);
+
+ if (LocateValueAll(g, GetVlp(pair)))
+ return true;
+
+ } // endfor i
+
+ I--;
+ } // endif I
+
+ return false;
+} // end of LocateObjectAll
+
+/*********************************************************************************/
+/* Locate a JSON Value. */
+/*********************************************************************************/
+my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp)
+{
+ if (CompareTree(g, Bvalp, jvp))
+ return AddPath();
+ else if (jvp->Type == TYPE_JAR)
+ return LocateArrayAll(g, jvp);
+ else if (jvp->Type == TYPE_JOB)
+ return LocateObjectAll(g, jvp);
+
+ return false;
+} // end of LocateValueAll
+
+/*********************************************************************************/
+/* Compare two JSON trees. */
+/*********************************************************************************/
+my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2)
+{
+ if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2))
+ return false;
+
+ my_bool found = true;
+
+ if (jp1->Type == TYPE_JAR) {
+ for (int i = 0; found && i < GetArraySize(jp1); i++)
+ found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i)));
+
+ } else if (jp1->Type == TYPE_JOB) {
+ PBPR p1 = GetObject(jp1), p2 = GetObject(jp2);
+
+ // Keys can be differently ordered
+ for (; found && p1 && p2; p1 = GetNext(p1))
+ found = CompareValues(g, GetVlp(p1), GetKeyValue(jp2, GetKey(p1)));
+
+ } else if (jp1->Type == TYPE_JVAL) {
+ found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val)));
+ } else
+ found = CompareValues(g, jp1, jp2);
+
+ return found;
+} // end of CompareTree
+
+/*********************************************************************************/
+/* Compare two VAL values and return true if they are equal. */
+/*********************************************************************************/
+my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2)
+{
+ my_bool b = false;
+
+ if (v1 && v2)
+ switch (v1->Type) {
+ case TYPE_JAR:
+ case TYPE_JOB:
+ if (v2->Type == v1->Type)
+ b = CompareTree(g, v1, v2);
+
+ break;
+ case TYPE_STRG:
+ if (v2->Type == TYPE_STRG) {
+ if (v1->Nd || v2->Nd) // Case insensitive
+ b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+ else
+ b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+
+ } // endif Type
+
+ break;
+ case TYPE_DTM:
+ if (v2->Type == TYPE_DTM)
+ b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+
+ break;
+ case TYPE_INTG:
+ if (v2->Type == TYPE_INTG)
+ b = (v1->N == v2->N);
+ else if (v2->Type == TYPE_BINT)
+ b = ((longlong)v1->N == LLN(v2->To_Val));
+
+ break;
+ case TYPE_BINT:
+ if (v2->Type == TYPE_INTG)
+ b = (LLN(v1->To_Val) == (longlong)v2->N);
+ else if (v2->Type == TYPE_BINT)
+ b = (LLN(v1->To_Val) == LLN(v2->To_Val));
+
+ break;
+ case TYPE_FLOAT:
+ if (v2->Type == TYPE_FLOAT)
+ b = (v1->F == v2->F);
+ else if (v2->Type == TYPE_DBL)
+ b = ((double)v1->F == DBL(v2->To_Val));
+
+ break;
+ case TYPE_DBL:
+ if (v2->Type == TYPE_DBL)
+ b = (DBL(v1->To_Val) == DBL(v2->To_Val));
+ else if (v2->Type == TYPE_FLOAT)
+ b = (DBL(v1->To_Val) == (double)v2->F);
+
+ break;
+ case TYPE_BOOL:
+ if (v2->Type == TYPE_BOOL)
+ b = (v1->B == v2->B);
+
+ break;
+ case TYPE_NULL:
+ b = (v2->Type == TYPE_NULL);
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ else
+ b = (!v1 && !v2);
+
+ return b;
+} // end of CompareValues
+
+/*********************************************************************************/
+/* Add the found path to the list. */
+/*********************************************************************************/
+my_bool BJNX::AddPath(void)
+{
+ char s[16];
+
+ if (Jp->WriteStr("\"$"))
+ return true;
+
+ for (int i = 0; i <= I; i++) {
+ if (Jpnp[i].Type == TYPE_JAR) {
+ sprintf(s, "[%d]", Jpnp[i].N + B);
+
+ if (Jp->WriteStr(s))
+ return true;
+
+ } else {
+ if (Jp->WriteChr('.'))
+ return true;
+
+ if (Jp->WriteStr(Jpnp[i].Key))
+ return true;
+
+ } // endif's
+
+ } // endfor i
+
+ if (Jp->WriteStr("\","))
+ return true;
+
+ return false;
+} // end of AddPath
+
+/*********************************************************************************/
+/* Make a JSON value from the passed argument. */
+/*********************************************************************************/
+PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top)
+{
+ char *sap = (args->arg_count > i) ? args->args[i] : NULL;
+ int n, len;
+ int ci;
+ long long bigint;
+ PGLOBAL& g = G;
+ PBVAL jvp = NewVal();
+
+ if (top)
+ *top = NULL;
+
+ if (sap) switch (args->arg_type[i]) {
+ case STRING_RESULT:
+ if ((len = args->lengths[i])) {
+ if ((n = IsArgJson(args, i)) < 3)
+ sap = MakePSZ(g, args, i);
+
+ if (n) {
+ if (n == 3) {
+ PBSON bsp = (PBSON)sap;
+
+ if (i == 0) {
+ if (top)
+ *top = (PBVAL)bsp->Top;
+
+ jvp = (PBVAL)bsp->Jsp;
+ G = bsp->G;
+ Base = G->Sarea;
+ } else {
+ BJNX bnx(bsp->G);
+
+ jvp = MoveJson(&bnx, (PBVAL)bsp->Jsp);
+ } // endelse i
+
+ } else {
+ if (n == 2) {
+ if (!(sap = GetJsonFile(g, sap))) {
+ PUSH_WARNING(g->Message);
+ return jvp;
+ } // endif sap
+
+ len = strlen(sap);
+ } // endif n
+
+ if (!(jvp = ParseJson(g, sap, strlen(sap))))
+ PUSH_WARNING(g->Message);
+ else if (top)
+ *top = jvp;
+
+ } // endif's n
+
+ } else {
+ PBVAL bp = NULL;
+
+ if (b) {
+ if (strchr("[{ \t\r\n", *sap)) {
+ // Check whether this string is a valid json string
+ JsonMemSave(g);
+
+ if (!(bp = ParseJson(g, sap, strlen(sap))))
+ JsonSubSet(g); // Recover suballocated memory
+
+ g->Saved_Size = 0;
+ } else {
+ // Perhaps a file name
+ char* s = GetJsonFile(g, sap);
+
+ if (s)
+ bp = ParseJson(g, s, strlen(s));
+
+ } // endif's
+
+ } // endif b
+
+ if (!bp) {
+ ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
+ SetString(jvp, sap, ci);
+ } else {
+ if (top)
+ *top = bp;
+
+ jvp = bp;
+ } // endif bp
+
+ } // endif n
+
+ } // endif len
+
+ break;
+ case INT_RESULT:
+ bigint = *(long long*)sap;
+
+ if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) ||
+ (bigint == 1LL && !strcmp(args->attributes[i], "TRUE")))
+ SetBool(jvp, (char)bigint);
+ else
+ SetBigint(jvp, bigint);
+
+ break;
+ case REAL_RESULT:
+ SetFloat(jvp, *(double*)sap);
+ break;
+ case DECIMAL_RESULT:
+ SetFloat(jvp, MakePSZ(g, args, i));
+ break;
+ case TIME_RESULT:
+ case ROW_RESULT:
+ default:
+ break;
+ } // endswitch arg_type
+
+ return jvp;
+} // end of MakeValue
+
+/*********************************************************************************/
+/* Try making a JSON value of the passed type from the passed argument. */
+/*********************************************************************************/
+PBVAL BJNX::MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, JTYP type, PBVAL *top)
+{
+ char *sap;
+ PBVAL jsp;
+ PBVAL jvp = MakeValue(args, i, false, top);
+
+ //if (type == TYPE_JSON) {
+ // if (jvp->GetValType() >= TYPE_JSON)
+ // return jvp;
+
+ //} else if (jvp->GetValType() == type)
+ // return jvp;
+
+ if (jvp->Type == TYPE_STRG) {
+ sap = GetString(jvp);
+
+ if ((jsp = ParseJson(g, sap, strlen(sap)))) {
+ if ((type == TYPE_JSON && jsp->Type != TYPE_JVAL) || jsp->Type == type) {
+ if (top)
+ *top = jvp;
+
+ SetValueVal(jvp, jsp);
+ } // endif Type
+
+ } // endif jsp
+
+ } // endif Type
+
+ return jvp;
+} // end of MakeTypedValue
+
+/*********************************************************************************/
+/* Parse a json file. */
+/*********************************************************************************/
+PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len)
+{
+ char *memory;
+ HANDLE hFile;
+ MEMMAP mm;
+ PBVAL jsp;
+
+ // Create the mapping file object
+ hFile = CreateFileMap(g, fn, &mm, MODE_READ, false);
+
+ if (hFile == INVALID_HANDLE_VALUE) {
+ DWORD rc = GetLastError();
+
+ if (!(*g->Message))
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int)rc, fn);
+
+ return NULL;
+ } // endif hFile
+
+ // Get the file size
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += mm.lenH;
+
+ memory = (char *)mm.memory;
+
+ if (!len) { // Empty or deleted file
+ CloseFileHandle(hFile);
+ return NULL;
+ } // endif len
+
+ if (!memory) {
+ CloseFileHandle(hFile);
+ sprintf(g->Message, MSG(MAP_VIEW_ERROR), fn, GetLastError());
+ return NULL;
+ } // endif Memory
+
+ CloseFileHandle(hFile); // Not used anymore
+
+ // Parse the json file and allocate its tree structure
+ g->Message[0] = 0;
+ jsp = ParseJson(g, memory, len);
+ pty = pretty;
+ CloseMemMap(memory, len);
+ return jsp;
+} // end of ParseJsonFile
+
+/*********************************************************************************/
+/* Make the result according to the first argument type. */
+/*********************************************************************************/
+char *BJNX::MakeResult(UDF_ARGS *args, PBVAL top, uint n)
+{
+ char *str = NULL;
+ PGLOBAL& g = G;
+
+ if (IsArgJson(args, 0) == 2) {
+ // Make the change in the json file
+ PSZ fn = MakePSZ(g, args, 0);
+
+ if (Changed) {
+ int pretty = 2;
+
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ } // endif type
+
+ if (!Serialize(g, top, fn, pretty))
+ PUSH_WARNING(g->Message);
+
+ Changed = false;
+ } // endif Changed
+
+ str = fn;
+ } else if (IsArgJson(args, 0) == 3) {
+ PBSON bsp = (PBSON)args->args[0];
+
+ if (bsp->Filename) {
+ if (Changed) {
+ // Make the change in the json file
+ if (!Serialize(g, (PBVAL)top, bsp->Filename, bsp->Pretty))
+ PUSH_WARNING(g->Message);
+
+ Changed = false;
+ } // endif Changed
+
+ str = bsp->Filename;
+ } else if (!(str = Serialize(g, (PBVAL)top, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else if (!(str = Serialize(g, top, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ return str;
+} // end of MakeResult
+
+/*********************************************************************************/
+/* Make the binary result according to the first argument type. */
+/*********************************************************************************/
+PBSON BJNX::MakeBinResult(UDF_ARGS *args, PBVAL top, ulong len, int n)
+{
+ char* filename = NULL;
+ int pretty = 2;
+ PBSON bnp = NULL;
+
+ if (IsArgJson(args, 0) == 3) {
+ bnp = (PBSON)args->args[0];
+
+ if (bnp->Top != (PJSON)top)
+ bnp->Top = bnp->Jsp = (PJSON)top;
+
+ return bnp;
+ } // endif 3
+
+ if (IsArgJson(args, 0) == 2) {
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ } // endif type
+
+ filename = (char*)args->args[0];
+ } // endif 2
+
+ if ((bnp = BbinAlloc(G, len, top))) {
+ bnp->Filename = filename;
+ bnp->Pretty = pretty;
+ strcpy(bnp->Msg, "Json Binary item");
+ } //endif bnp
+
+ return bnp;
+} // end of MakeBinResult
+
+/***********************************************************************/
+/* Move a Json val block from one area to the current area. */
+/***********************************************************************/
+PBVAL BJNX::MoveVal(PBVAL vlp)
+{
+ PBVAL nvp = NewVal(vlp->Type);
+
+ nvp->Nd = vlp->Nd;
+ return nvp;
+} // end of MovedVal
+
+/***********************************************************************/
+/* Move a Json tree from one area to current area. */
+/***********************************************************************/
+PBVAL BJNX::MoveJson(PBJNX bxp, PBVAL jvp)
+{
+ PBVAL res = NULL;
+
+ if (jvp)
+ switch (jvp->Type) {
+ case TYPE_JAR:
+ res = MoveArray(bxp, jvp);
+ break;
+ case TYPE_JOB:
+ res = MoveObject(bxp, jvp);
+ break;
+ default:
+ res = MoveValue(bxp, jvp);
+ break;
+ } // endswitch Type
+
+ return res;
+} // end of MoveJson
+
+/***********************************************************************/
+/* Move an array. */
+/***********************************************************************/
+PBVAL BJNX::MoveArray(PBJNX bxp, PBVAL jap)
+{
+ PBVAL vlp, vmp, jvp = NULL, jarp = MoveVal(jap);
+
+ for (vlp = bxp->GetArray(jap); vlp; vlp = bxp->GetNext(vlp)) {
+ vmp = MoveJson(bxp, vlp);
+
+ if (jvp)
+ jvp->Next = MOF(vmp);
+ else
+ jarp->To_Val = MOF(vmp);
+
+ jvp = vmp;
+ } // endfor vlp
+
+ return jarp;
+} // end of MoveArray
+
+/***********************************************************************/
+/* Replace all object pointers by offsets. */
+/***********************************************************************/
+PBVAL BJNX::MoveObject(PBJNX bxp, PBVAL jop)
+{
+ PBPR mpp, prp, ppp = NULL;
+ PBVAL vmp, jobp = MoveVal(jop);
+
+ for (prp = bxp->GetObject(jop); prp; prp = bxp->GetNext(prp)) {
+ vmp = MoveJson(bxp, GetVlp(prp));
+ mpp = NewPair(DupStr(bxp->MZP(prp->Key)));
+ SetPairValue(mpp, vmp);
+
+ if (ppp)
+ ppp->Vlp.Next = MOF(mpp);
+ else
+ jobp->To_Val = MOF(mpp);
+
+ ppp = mpp;
+ } // endfor vlp
+
+ return jobp;
+} // end of MoffObject
+
+/***********************************************************************/
+/* Move a non json value. */
+/***********************************************************************/
+PBVAL BJNX::MoveValue(PBJNX bxp, PBVAL jvp)
+{
+ double *dp;
+ PBVAL nvp = MoveVal(jvp);
+
+ switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ nvp->To_Val = DupStr(bxp->MZP(jvp->To_Val));
+ break;
+ case TYPE_DBL:
+ dp = (double*)BsonSubAlloc(sizeof(double));
+ *dp = bxp->DBL(jvp->To_Val);
+ nvp->To_Val = MOF(dp);
+ break;
+ case TYPE_JVAL:
+ nvp->To_Val = MOF(MoveJson(bxp, bxp->MVP(jvp->To_Val)));
+ break;
+ default:
+ nvp->To_Val = jvp->To_Val;
+ break;
+ } // endswith Type
+
+ return nvp;
+} // end of MoveValue
+
+/* -----------------------------Utility functions ------------------------------ */
+
+/*********************************************************************************/
+/* Returns a pointer to the first integer argument found from the nth argument. */
+/*********************************************************************************/
+static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n)
+{
+ int *x = NULL;
+
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ if (args->args[i]) {
+ if ((x = (int*)PlgDBSubAlloc(g, NULL, sizeof(int))))
+ *x = (int)*(longlong*)args->args[i];
+ else
+ PUSH_WARNING(g->Message);
+
+ } // endif args
+
+ n = i + 1;
+ break;
+ } // endif arg_type
+
+ return x;
+} // end of GetIntArgPtr
+
+/*********************************************************************************/
+/* Returns not 0 if the argument is a JSON item or file name. */
+/*********************************************************************************/
+int IsArgJson(UDF_ARGS *args, uint i)
+{
+ int n = 0;
+
+ if (i >= args->arg_count || args->arg_type[i] != STRING_RESULT) {
+ } else if (!strnicmp(args->attributes[i], "Bson_", 5) ||
+ !strnicmp(args->attributes[i], "Json_", 5)) {
+ if (!args->args[i] || strchr("[{ \t\r\n", *args->args[i]))
+ n = 1; // arg should be is a json item
+// else
+// n = 2; // A file name may have been returned
+
+ } else if (!strnicmp(args->attributes[i], "Bbin_", 5)) {
+ if (args->lengths[i] == sizeof(BSON))
+ n = 3; // arg is a binary json item
+// else
+// n = 2; // A file name may have been returned
+
+ } else if (!strnicmp(args->attributes[i], "Bfile_", 6) ||
+ !strnicmp(args->attributes[i], "Jfile_", 6)) {
+ n = 2; // arg is a json file name
+#if 0
+ } else if (args->lengths[i]) {
+ PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024);
+ char *sap = MakePSZ(g, args, i);
+
+ if (ParseJson(g, sap, strlen(sap)))
+ n = 4;
+
+ JsonFreeMem(g);
+#endif // 0
+ } // endif's
+
+ return n;
+} // end of IsArgJson
+
+/*********************************************************************************/
+/* GetFileLength: returns file size in number of bytes. */
+/*********************************************************************************/
+static long GetFileLength(char *fn)
+{
+ int h;
+ long len;
+
+ h= open(fn, _O_RDONLY);
+
+ if (h != -1) {
+ if ((len = _filelength(h)) < 0)
+ len = 0;
+
+ close(h);
+ } else
+ len = 0;
+
+ return len;
+} // end of GetFileLength
+
+/* ------------------------- Now the new Bin UDF's ----------------------------- */
+
+/*********************************************************************************/
+/* Make a Json value containing the parameter. */
+/*********************************************************************************/
+my_bool bsonvalue_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count > 1) {
+ strcpy(message, "Cannot accept more than 1 argument");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bsonvalue_init
+
+char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char*, char*)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, 1, false)) {
+ BJNX bnx(g);
+ PBVAL bvp = bnx.MakeValue(args, 0, true);
+
+ if (!(str = bnx.Serialize(g, bvp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ } else
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bsonValue
+
+void bsonvalue_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonvalue_deinit
+
+/*********************************************************************************/
+/* Make a Json array containing all the parameters. */
+/* Note: jvp must be set before arp because it can be a binary argument. */
+/*********************************************************************************/
+my_bool bson_make_array_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_make_array_init
+
+char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char*, char*)
+{
+ char* str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false)) {
+ BJNX bnx(g);
+ PBVAL jvp = bnx.MakeValue(args, 0);
+ PBVAL arp = bnx.NewVal(TYPE_JAR);
+
+ for (uint i = 0; i < args->arg_count;) {
+ bnx.AddArrayValue(arp, jvp);
+ jvp = bnx.MakeValue(args, ++i);
+ } // endfor i
+
+ if (!(str = bnx.Serialize(g, arp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ } else
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_make_array
+
+void bson_make_array_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_make_array_deinit
+
+/*********************************************************************************/
+/* Add one or several values to a Bson array. */
+/*********************************************************************************/
+my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ //} else if (!IsArgJson(args, 0, true)) {
+ // strcpy(message, "First argument must be a valid json string or item");
+ // return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_add_values_init
+
+char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char*) {
+ char* str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ BJNX bnx(g);
+ PBVAL arp = bnx.MakeValue(args, 0, true);
+
+ if (arp->Type != TYPE_JAR) {
+ PUSH_WARNING("First argument is not an array");
+ goto fin;
+ } // endif arp
+
+ for (uint i = 1; i < args->arg_count; i++)
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, i));
+
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, arp, INT_MAX);
+ } // endif CheckMemory
+
+ if (!str) {
+ PUSH_WARNING(g->Message);
+ str = args->args[0];
+ } // endif str
+
+ // Keep result of constant function
+ g->Xchk = (g->N) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_add_values
+
+void bson_array_add_values_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_add_values_deinit
+
+/*********************************************************************************/
+/* Add one value to a Json array. */
+/*********************************************************************************/
+my_bool bson_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ //} else if (!IsArgJson(args, 0, true)) {
+ // strcpy(message, "First argument is not a valid Json item");
+ // return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_add_init
+
+char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ int *x;
+ uint n = 2;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jsp, top;
+ PBVAL arp, jvp = bnx.MakeValue(args, 0, true, &top);
+
+ jsp = jvp;
+ x = GetIntArgPtr(g, args, n);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp) {
+ if (jvp->Type != TYPE_JAR) {
+ if ((arp = bnx.NewVal(TYPE_JAR))) {
+ bnx.AddArrayValue(arp, jvp);
+
+ if (!top)
+ top = arp;
+
+ } // endif arp
+
+ } else
+ arp = jvp;
+
+ if (arp) {
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, 1), x);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top, n);
+ } else
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("Target is not an array");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_add
+
+void bson_array_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json array. */
+/*********************************************************************************/
+my_bool bson_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_delete_init
+
+char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ int *x;
+ uint n = 1;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL arp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (!(x = GetIntArgPtr(g, args, n)))
+ PUSH_WARNING("Missing or null array index");
+ else if (bnx.CheckPath(g, args, jvp, arp, 1))
+ PUSH_WARNING(g->Message);
+ else if (arp && arp->Type == TYPE_JAR) {
+ bnx.DeleteValue(arp, *x);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top, n);
+ } else {
+ PUSH_WARNING("First argument target is not an array");
+ // if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_delete
+
+void bson_array_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_delete_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the parameters. */
+/*********************************************************************************/
+my_bool bson_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_make_object_init
+
+char *bson_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_make_object
+
+void bson_make_object_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_make_object_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all not null parameters. */
+/*********************************************************************************/
+my_bool bson_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args,
+ char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_object_nonull_init
+
+char *bson_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL jvp, objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i)))
+ bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_nonull
+
+void bson_object_nonull_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_nonull_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the key/value parameters. */
+/*********************************************************************************/
+my_bool bson_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count % 2) {
+ strcpy(message, "This function must have an even number of arguments");
+ return true;
+ } // endif arg_count
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_object_key_init
+
+char *bson_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i += 2)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_key
+
+void bson_object_key_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_key_deinit
+
+/*********************************************************************************/
+/* Add or replace a value in a Json Object. */
+/*********************************************************************************/
+my_bool bson_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_object_add_init
+
+char *bson_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PSZ key;
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, true, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jvp, objp;
+ PBVAL jsp, top;
+
+ jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type == TYPE_JOB) {
+ objp = jvp;
+ jvp = bnx.MakeValue(args, 1);
+ key = bnx.MakeKey(args, 1);
+ bnx.SetKeyValue(objp, jvp, key);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_add
+
+void bson_object_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json object. */
+/*********************************************************************************/
+my_bool bson_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument must be a key string");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_object_delete_init
+
+char *bson_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, true, true)) {
+ bool chg;
+ BJNX bnx(g, NULL, TYPE_STRG);
+ PSZ key;
+ PBVAL jsp, objp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, false, &top);
+
+ jsp = jvp;
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type == TYPE_JOB) {
+ key = bnx.MakeKey(args, 1);
+ objp = jvp;
+ chg = bnx.DeleteKey(objp, key);
+ bnx.SetChanged(chg);
+ str = bnx.MakeResult(args, top);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_delete
+
+void bson_object_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_delete_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object keys. */
+/*********************************************************************************/
+my_bool bson_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function must have 1 argument");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "Argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_object_list_init
+
+char *bson_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->N) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL jarp;
+ PBVAL jsp = bnx.MakeValue(args, 0, true);
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetKeyList(jsp);
+
+ if (!(str = bnx.Serialize(g, jarp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = str;
+ g->N = 1; // str can be NULL
+ } // endif const_item
+
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_list
+
+void bson_object_list_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_list_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object values. */
+/*********************************************************************************/
+my_bool bson_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function must have 1 argument");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "Argument must be a json object");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_object_values_init
+
+char *bson_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->N) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ char *p;
+ PBVAL jsp, jarp;
+ PBVAL jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ return NULL;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetObjectValList(jsp);
+
+ if (!(str = bnx.Serialize(g, jarp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = str;
+ g->N = 1; // str can be NULL
+ } // endif const_item
+
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_values
+
+void bson_object_values_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_values_deinit
+
+/*********************************************************************************/
+/* Set the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonset_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) {
+ strcpy(message, "This function must have 1 integer argument");
+ return true;
+ } else
+ return false;
+
+} // end of bsonset_def_prec_init
+
+long long bsonset_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ long long n = *(long long*)args->args[0];
+
+ JsonDefPrec = (int)n;
+ return (long long)GetJsonDefPrec();
+} // end of bsonset_def_prec
+
+/*********************************************************************************/
+/* Get the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonget_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 0) {
+ strcpy(message, "This function must have no arguments");
+ return true;
+ } else
+ return false;
+
+} // end of bsonget_def_prec_init
+
+long long bsonget_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ return (long long)GetJsonDefPrec();
+} // end of bsonget_def_prec
+
+/*********************************************************************************/
+/* Set the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonset_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) {
+ strcpy(message, "This function must have 1 integer argument");
+ return true;
+ } else
+ return false;
+
+} // end of bsonset_grp_size_init
+
+long long bsonset_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ long long n = *(long long*)args->args[0];
+
+ JsonGrpSize = (uint)n;
+ return (long long)GetJsonGroupSize();
+} // end of bsonset_grp_size
+
+/*********************************************************************************/
+/* Get the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonget_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 0) {
+ strcpy(message, "This function must have no arguments");
+ return true;
+ } else
+ return false;
+
+} // end of bsonget_grp_size_init
+
+long long bsonget_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ return (long long)GetJsonGroupSize();
+} // end of bsonget_grp_size
+
+/*********************************************************************************/
+/* Make a Json array from values coming from rows. */
+/*********************************************************************************/
+my_bool bson_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, n = GetJsonGroupSize();
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function can only accept 1 argument");
+ return true;
+ } else if (IsArgJson(args, 0) == 3) {
+ strcpy(message, "This function does not support Jbin arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ reslen *= n;
+ memlen += ((memlen - MEMFIX) * (n - 1));
+
+ if (JsonInit(initid, args, message, false, reslen, memlen))
+ return true;
+
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = new(g) BJNX(g);
+
+ JsonMemSave(g);
+ return false;
+} // end of bson_array_grp_init
+
+void bson_array_grp_clear(UDF_INIT *initid, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+
+ JsonSubSet(g);
+ g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JAR);
+ g->N = GetJsonGroupSize();
+} // end of bson_array_grp_clear
+
+void bson_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (arp && g->N-- > 0)
+ bxp->AddArrayValue(arp, bxp->MakeValue(args, 0));
+
+} // end of bson_array_grp_add
+
+char *bson_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (!arp || !(str = bxp->Serialize(g, arp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_array_grp
+
+void bson_array_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json object from values coming from rows. */
+/*********************************************************************************/
+my_bool bson_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, n = GetJsonGroupSize();
+
+ if (args->arg_count != 2) {
+ strcpy(message, "This function requires 2 arguments (key, value)");
+ return true;
+ } else if (IsArgJson(args, 0) == 3) {
+ strcpy(message, "This function does not support Jbin arguments");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen);
+
+ reslen *= n;
+ memlen += ((memlen - MEMFIX) * (n - 1));
+
+ if (JsonInit(initid, args, message, false, reslen, memlen))
+ return true;
+
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = new(g) BJNX(g);
+
+ JsonMemSave(g);
+ return false;
+} // end of bson_object_grp_init
+
+void bson_object_grp_clear(UDF_INIT *initid, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+
+ JsonSubSet(g);
+ g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JOB);
+ g->N = GetJsonGroupSize();
+} // end of bson_object_grp_clear
+
+void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N-- > 0)
+ bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0));
+
+} // end of bson_object_grp_add
+
+char *bson_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (!bop || !(str = bxp->Serialize(g, bop, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_grp
+
+void bson_object_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_grp_deinit
+
+/*********************************************************************************/
+/* Test BJSON parse and serialize. */
+/*********************************************************************************/
+my_bool bson_test_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count == 0) {
+ strcpy(message, "At least 1 argument required (json)");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_test_init
+
+char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char* str = NULL, * sap = NULL, * fn = NULL;
+ int pretty = 1;
+ PBVAL bvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto err;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else // Sarea may have been reallocated
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ for (uint i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == STRING_RESULT)
+ fn = args->args[i];
+ else if (args->arg_type[i] == INT_RESULT)
+ pretty = (int)*(longlong*)args->args[i];
+
+ // Serialize the parse tree
+ str = bnx.Serialize(g, bvp, fn, pretty);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ str = NULL;
+ } // end catch
+
+err:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_test
+
+void bson_test_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_test_deinit
+
+/*********************************************************************************/
+/* Locate a value in a Json tree. */
+/*********************************************************************************/
+my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (rank)");
+ return true;
+ } // endifs args
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ if (IsArgJson(args, 0) == 3)
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonlocate_init
+
+char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char *path = NULL;
+ int k;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (g->Activityp) {
+ path = (char*)g->Activityp;
+ *res_length = strlen(path);
+ return path;
+ } else {
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else {
+ bnx.Reset(); // Sarea may have been re-allocated
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (!bvp) {
+ bnx.GetMsg(g);
+ PUSH_WARNING(g->Message);
+ goto err;
+ } else if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp
+
+ k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1;
+
+// bnxp = new(g) BJNX(g, bvp, TYPE_STRING);
+ path = bnx.Locate(g, bvp, bvp2, k);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!path) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(path);
+
+ return path;
+} // end of bsonlocate
+
+void bsonlocate_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonlocate_deinit
+
+/*********************************************************************************/
+/* Locate all occurences of a value in a Json tree. */
+/*********************************************************************************/
+my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (Depth)");
+ return true;
+ } // endifs
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ if (IsArgJson(args, 0) == 3)
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_locate_all_init
+
+char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char* path = NULL;
+ int mx = 10;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (g->Activityp) {
+ path = (char*)g->Activityp;
+ *res_length = strlen(path);
+ return path;
+ } else {
+ *error = 1;
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp
+
+ if (args->arg_count > 2)
+ mx = (int)*(long long*)args->args[2];
+
+// bnxp = new(g) BJNX(g, bvp, TYPE_STRING);
+ path = bnx.LocateAll(g, bvp, bvp2, mx);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!path) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(path);
+
+ return path;
+} // end of bson_locate_all
+
+void bson_locate_all_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_locate_all_deinit
+
+/*********************************************************************************/
+/* Check whether the document contains a value or item. */
+/*********************************************************************************/
+my_bool bson_contains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (index)");
+ return true;
+ } else if (args->arg_count > 3) {
+ if (args->arg_type[3] == INT_RESULT && args->args[3])
+ more += (unsigned long)*(long long*)args->args[3];
+ else
+ strcpy(message, "Fourth argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ // TODO: calculate this
+ more += (IsArgJson(args, 0) != 3 ? 1000 : 0);
+
+ return JsonInit(initid, args, message, false, reslen, memlen, more);
+} // end of bson contains_init
+
+long long bson_contains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
+{
+ char isn, res[256];
+ unsigned long reslen;
+
+ isn = 0;
+ bsonlocate(initid, args, res, &reslen, &isn, error);
+ return (isn) ? 0LL : 1LL;
+} // end of bson_contains
+
+void bson_contains_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_contains_deinit
+
+/*********************************************************************************/
+/* Check whether the document contains a path. */
+/*********************************************************************************/
+my_bool bsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (path)");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] == INT_RESULT && args->args[2])
+ more += (unsigned long)*(long long*)args->args[2];
+ else
+ strcpy(message, "Third argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ // TODO: calculate this
+ more += (IsArgJson(args, 0) != 3 ? 1000 : 0);
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsoncontains_path_init
+
+long long bsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
+{
+ char *p, *path;
+ long long n;
+ PBVAL jsp;
+ PBVAL jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (!g->Activityp) {
+ return 0LL;
+ } else
+ return *(long long*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto err;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ bxp = new(g) BJNX(g, jsp, TYPE_BIGINT);
+ path = MakePSZ(g, args, 1);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif SetJpath
+
+ n = (bxp->CheckPath(g)) ? 1LL : 0LL;
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long));
+
+ if (np) {
+ *np = n;
+ g->Activityp = (PACTIVITY)np;
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif const_item
+
+ return n;
+
+err:
+ if (g->Mrr) *error = 1;
+ return 0LL;
+} // end of bsoncontains_path
+
+void bsoncontains_path_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsoncontains_path_deinit
+
+/*********************************************************************************/
+/* Merge two arrays or objects. */
+/*********************************************************************************/
+my_bool bson_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (!IsArgJson(args, i) && args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Argument %d must be a json item", i);
+ return true;
+ } // endif type
+
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_item_merge_init
+
+char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ JTYP type;
+ BJNX bnx(g);
+ PBVAL jvp, top = NULL;
+ PBVAL jsp[2] = {NULL, NULL};
+
+ for (int i = 0; i < 2; i++) {
+ jvp = bnx.MakeValue(args, i, true);
+
+ if (i) {
+ if (jvp->Type != type) {
+ PUSH_WARNING("Argument types mismatch");
+ goto fin;
+ } // endif type
+
+ } else {
+ type = (JTYP)jvp->Type;
+
+ if (type != TYPE_JAR && type != TYPE_JOB) {
+ PUSH_WARNING("First argument is not an array or object");
+ goto fin;
+ } else
+ top = jvp;
+
+ } // endif i
+
+ jsp[i] = jvp;
+ } // endfor i
+
+ if (type == TYPE_JAR)
+ bnx.MergeArray(jsp[0], jsp[1]);
+ else
+ bnx.MergeObject(jsp[0], jsp[1]);
+
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top);
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged first argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *error = 1;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_item_merge
+
+void bson_item_merge_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_item_merge_deinit
+
+/*********************************************************************************/
+/* Get a Json item from a Json document. */
+/*********************************************************************************/
+my_bool bson_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (jpath)");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more = fl * 3;
+ } else if (n != 3) {
+ more = args->lengths[0] * 3;
+ } else
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_get_item_init
+
+char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *path, *str = NULL;
+ PBVAL jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g, NULL, TYPE_STRING, initid->max_length);
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto fin;
+ } else {
+ bnx.Reset();
+ jvp = bnx.MakeValue(args, 0, true);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jvp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+
+ if (bnx.SetJpath(g, path, true)) {
+ goto fin;
+ } else
+ jvp = bnx.GetRowValue(g, jvp, 0);
+
+ if (!bnx.IsJson(jvp)) {
+ strcpy(g->Message, "Not a Json item");
+ } else
+ str = bnx.Serialize(g, jvp, NULL, 0);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+fin:
+ if (!str) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_get_item
+
+void bson_get_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_get_item_deinit
+
+/*********************************************************************************/
+/* Get a string value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (jpath)");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] == INT_RESULT && args->args[2])
+ more += (unsigned long)*(long long*)args->args[2];
+ else
+ strcpy(message, "Third argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more += fl * 3;
+ } else if (n != 3)
+ more += args->lengths[0] * 3;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_string_init
+
+char *bsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *p, *path, *str = NULL;
+ PBVAL jsp, jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto err;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto err;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } else
+ bxp->ReadValue(g);
+
+ if (!bxp->GetValue()->IsNull())
+ str = bxp->GetValue()->GetCharValue();
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
+
+err:
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bsonget_string
+
+void bsonget_string_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_string_deinit
+
+/*********************************************************************************/
+/* Get an integer value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+
+ if (args->arg_count != 2) {
+ strcpy(message, "This function must have 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a (jpath) string");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ more = (IsArgJson(args, 0) != 3) ? 1000 : 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_int_init
+
+long long bsonget_int(UDF_INIT *initid, UDF_ARGS *args,
+ char *is_null, char *error)
+{
+ char *p, *path;
+ long long n;
+ PBVAL jsp, jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (!g->Activityp) {
+ *is_null = 1;
+ return 0LL;
+ } else
+ return *(long long*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0LL;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jsp, TYPE_BIGINT);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0;
+ } else
+ bxp->ReadValue(g);
+
+ if (bxp->GetValue()->IsNull()) {
+ *is_null = 1;
+ return 0;
+ } // endif IsNull
+
+ n = bxp->GetValue()->GetBigintValue();
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long));
+
+ if (np) {
+ *np = n;
+ g->Activityp = (PACTIVITY)np;
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif const_item
+
+ return n;
+} // end of bsonget_int
+
+void bsonget_int_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_int_deinit
+
+/*********************************************************************************/
+/* Get a double value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a (jpath) string");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (decimals)");
+ return true;
+ } else
+ initid->decimals = (uint)*(longlong*)args->args[2];
+
+ } else
+ initid->decimals = 15;
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ more = (IsArgJson(args, 0) != 3) ? 1000 : 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_real_init
+
+double bsonget_real(UDF_INIT *initid, UDF_ARGS *args,
+ char *is_null, char *error)
+{
+ char *p, *path;
+ double d;
+ PBVAL jsp, jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g);
+
+ if (g->N) {
+ if (!g->Activityp) {
+ *is_null = 1;
+ return 0.0;
+ } else
+ return *(double*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0.0;
+ } else {
+ bnx.Reset();
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+//bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE, 32, jsp->Nd);
+
+ if (bnx.SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } else
+ jvp = bnx.GetRowValue(g, jsp, 0);
+
+ if (!jvp || bnx.IsValueNull(jvp)) {
+ *is_null = 1;
+ return 0.0;
+ } else if (args->arg_count == 2) {
+ d = atof(bnx.GetString(jvp));
+ } else
+ d = bnx.GetDouble(jvp);
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ double *dp;
+
+ if ((dp = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) {
+ *dp = d;
+ g->Activityp = (PACTIVITY)dp;
+ } else {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } // endif dp
+
+ } // endif const_item
+
+ return d;
+} // end of jsonget_real
+
+void bsonget_real_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_real_deinit
+
+/*********************************************************************************/
+/* Delete items from a Json document. */
+/*********************************************************************************/
+my_bool bson_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ if (IsArgJson(args, 0) != 3) {
+ strcpy(message, "This function must have at least 2 arguments or one binary");
+ return true;
+ } // endif args
+
+ } // endif count
+
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // Is this a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_delete_item_init
+
+char *bson_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path, *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top, jar = NULL;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (args->arg_count == 1) {
+ // This should be coming from bbin_locate_all
+ jar = jvp; // This is the array of paths
+ jvp = top; // And this is the document
+ } else if(!bnx.IsJson(jvp)) {
+ PUSH_WARNING("First argument is not a JSON document");
+ goto fin;
+ } else if (args->arg_count == 2) {
+ // Check whether this is an array of paths
+ jar = bnx.MakeValue(args, 1, true);
+
+ if (jar && jar->Type != TYPE_JAR)
+ jar = NULL;
+
+ } // endif arg_count
+
+ if (jar) {
+ // Do the deletion in reverse order
+ for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) {
+ path = bnx.GetString(bnx.GetArrayValue(jar, i));
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ } else for (uint i = 1; i < args->arg_count; i++) {
+ path = MakePSZ(g, args, i);
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ str = bnx.MakeResult(args, top, INT_MAX);
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_delete_item
+
+void bson_delete_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_delete_item_deinit
+
+/*********************************************************************************/
+/* This function is used by the json_set/insert/update_item functions. */
+/*********************************************************************************/
+static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path, *str = NULL;
+ int w;
+ my_bool b = true;
+ PBJNX bxp;
+ PBVAL jsp, jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Alchecked) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (g->N)
+ g->Alchecked = 1;
+
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
+ w = 1;
+ else if (!strcmp(result, "$update"))
+ w = 2;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto fin;
+ } // endelse
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, false, true)) {
+ PUSH_WARNING("CheckMemory error");
+ throw 1;
+ } else {
+ BJNX bnx(g);
+
+ jsp = bnx.MakeValue(args, 0, true);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
+
+ for (uint i = 1; i + 1 < args->arg_count; i += 2) {
+ jvp = bxp->MakeValue(args, i);
+ path = MakePSZ(g, args, i + 1);
+
+ if (bxp->SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ if (w) {
+ bxp->ReadValue(g);
+ b = bxp->GetValue()->IsNull();
+ b = (w == 1) ? b : !b;
+ } // endif w
+
+ if (b && bxp->WriteValue(g, jvp)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bxp->SetChanged(true);
+ } // endfor i
+
+ // In case of error or file, return unchanged argument
+ if (!(str = bxp->MakeResult(args, jsp, INT_MAX32)))
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 0;
+ int n = IsArgJson(args, 0);
+
+ if (!(args->arg_count % 2)) {
+ strcpy(message, "This function must have an odd number of arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more += fl * 3;
+ } else if (n != 3)
+ more += args->lengths[0] * 3;
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen, more)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ g->Alchecked = 0;
+ return false;
+ } else
+ return true;
+
+} // end of bson_set_item_init
+
+char *bson_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_set_item
+
+void bson_set_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_set_item_deinit
+
+/*********************************************************************************/
+/* Insert Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bson_insert_item_init
+
+char *bson_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$insert");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_insert_item
+
+void bson_insert_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_insert_item_deinit
+
+/*********************************************************************************/
+/* Update Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bson_update_item_init
+
+char *bson_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$update");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_update_item
+
+void bson_update_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_update_item_deinit
+
+/*********************************************************************************/
+/* Returns a json file as a json string. */
+/*********************************************************************************/
+my_bool bson_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, fl, more = 1024;
+
+ if (args->arg_count < 1 || args->arg_count > 4) {
+ strcpy(message, "This function only accepts 1 to 4 arguments");
+ return true;
+ } else if (args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a string (file name)");
+ return true;
+ } // endif's args[0]
+
+ for (unsigned int i = 1; i < args->arg_count; i++) {
+ if (!(args->arg_type[i] == INT_RESULT || args->arg_type[i] == STRING_RESULT)) {
+ sprintf(message, "Argument %d is not an integer or a string (pretty or path)", i);
+ return true;
+ } // endif arg_type
+
+ // Take care of eventual memory argument
+ if (args->arg_type[i] == INT_RESULT && args->args[i])
+ more += (ulong)*(longlong*)args->args[i];
+
+ } // endfor i
+
+ initid->maybe_null = 1;
+ CalcLen(args, false, reslen, memlen);
+
+ if (args->args[0])
+ fl = GetFileLength(args->args[0]);
+ else
+ fl = 100; // What can be done here?
+
+ reslen += fl;
+
+ if (initid->const_item)
+ more += fl;
+
+ if (args->arg_count > 1)
+ more += fl * M;
+
+ memlen += more;
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_file_init
+
+char *bson_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *fn, *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Xchk;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+
+ if (args->arg_count > 1) {
+ int pretty = 3, pty = 3;
+ size_t len;
+ PBVAL jsp, jvp = NULL;
+ BJNX bnx(g);
+
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
+
+ // Parse the json file and allocate its tree structure
+ if (!(jsp = bnx.ParseJsonFile(g, fn, pty, len))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } // endif jsp
+
+ if (pty == 3)
+ PUSH_WARNING("File pretty format cannot be determined");
+ else if (pretty != 3 && pty != pretty)
+ PUSH_WARNING("File pretty format doesn't match the specified pretty value");
+ else if (pretty == 3)
+ pretty = pty;
+
+ // Check whether a path was specified
+ if (bnx.CheckPath(g, args, jsp, jvp, 1)) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } else if (jvp)
+ jsp = jvp;
+
+ if (!(str = bnx.Serialize(g, jsp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else
+ if (!(str = GetJsonFile(g, fn)))
+ PUSH_WARNING(g->Message);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_file
+
+void bson_file_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_file_deinit
+
+/*********************************************************************************/
+/* Make a json file from a json item. */
+/*********************************************************************************/
+my_bool bfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 1 || args->arg_count > 3) {
+ strcpy(message, "Wrong number of arguments");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } // endif
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen + 5000; // To take care of not pretty files
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bfile_make_init
+
+char *bfile_make(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *p, *str = NULL, *fn = NULL;
+ int n, pretty = 2;
+ PBVAL jsp, jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g);
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if ((n = IsArgJson(args, 0)) == 3) {
+ // Get default file name and pretty
+ PBSON bsp = (PBSON)args->args[0];
+
+ fn = bsp->Filename;
+ pretty = bsp->Pretty;
+ } else if ((n = IsArgJson(args, 0)) == 2)
+ fn = args->args[0];
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto fin;
+ } else
+ bnx.Reset();
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if (!n && (p = bnx.GetString(jvp))) {
+ if (!strchr("[{ \t\r\n", *p)) {
+ // Is this a file name?
+ if (!(p = GetJsonFile(g, p))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } else
+ fn = bnx.GetString(jvp);
+
+ } // endif p
+
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } // endif jsp
+
+ bnx.SetValueVal(jvp, jsp);
+ } // endif p
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ jvp = (PBVAL)g->Xchk;
+
+ for (uint i = 1; i < args->arg_count; i++)
+ switch (args->arg_type[i]) {
+ case STRING_RESULT:
+ fn = MakePSZ(g, args, i);
+ break;
+ case INT_RESULT:
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ default:
+ PUSH_WARNING("Unexpected argument type in bfile_make");
+ } // endswitch arg_type
+
+ if (fn) {
+ if (!bnx.Serialize(g, jvp, fn, pretty))
+ PUSH_WARNING(g->Message);
+ } else
+ PUSH_WARNING("Missing file name");
+
+ str = fn;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bfile_make
+
+void bfile_make_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_make_deinit
+
+/*********************************************************************************/
+/* Convert a prettiest Json file to Pretty=0. */
+/*********************************************************************************/
+my_bool bfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 3) {
+ strcpy(message, "This function must have 3 arguments");
+ return true;
+ } else if (args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i+1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bfile_convert_init
+
+char *bfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long *res_length, char *is_null, char *error) {
+ char *str, *fn, *ofn;
+ int lrecl = (int)*(longlong*)args->args[2];
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+ ofn = MakePSZ(g, args, 1);
+
+ if (!g->Xchk) {
+ JUP* jup = new(g) JUP(g);
+
+ str = jup->UnprettyJsonFile(g, fn, ofn, lrecl);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ PUSH_WARNING(g->Message ? g->Message : "Unexpected error");
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else {
+ strcpy(result, str);
+ *res_length = strlen(str);
+ } // endif str
+
+ return str;
+} // end of bfile_convert
+
+void bfile_convert_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_convert_deinit
+
+/*********************************************************************************/
+/* Convert a pretty=0 Json file to binary BJSON. */
+/*********************************************************************************/
+my_bool bfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 2 && args->arg_count != 3) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i + 1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen * M;
+ memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024;
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bfile_bjson_init
+
+char *bfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char*, char *error) {
+ char *buf, *str = NULL, fn[_MAX_PATH], ofn[_MAX_PATH];
+ bool loop;
+ ssize_t len, newloc;
+ size_t lrecl, binszp;
+ PBVAL jsp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BDOC doc(g);
+
+ strcpy(fn, MakePSZ(g, args, 0));
+ strcpy(ofn, MakePSZ(g, args, 1));
+
+ if (args->arg_count == 3)
+ lrecl = (size_t)*(longlong*)args->args[2];
+ else
+ lrecl = 1024;
+
+ if (!g->Xchk) {
+ int msgid = MSGID_OPEN_MODE_STRERROR;
+ FILE *fout;
+ FILE *fin;
+
+ if (!(fin = global_fopen(g, msgid, fn, "rt")))
+ str = strcpy(result, g->Message);
+ else if (!(fout = global_fopen(g, msgid, ofn, "wb")))
+ str = strcpy(result, g->Message);
+ else if ((buf = (char*)malloc(lrecl))) {
+ try {
+ do {
+ loop = false;
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+
+ if (!fgets(buf, lrecl, fin)) {
+ if (!feof(fin)) {
+ sprintf(g->Message, "Error %d reading %zd bytes from %s",
+ errno, lrecl, fn);
+ str = strcpy(result, g->Message);
+ } else
+ str = strcpy(result, ofn);
+
+ } else if ((len = strlen(buf))) {
+ if ((jsp = doc.ParseJson(g, buf, len))) {
+ newloc = (size_t)PlugSubAlloc(g, NULL, 0);
+ binszp = newloc - (size_t)jsp;
+
+ if (fwrite(&binszp, sizeof(binszp), 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, sizeof(binszp), ofn);
+ str = strcpy(result, g->Message);
+ } else if (fwrite(jsp, binszp, 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, binszp, ofn);
+ str = strcpy(result, g->Message);
+ } else
+ loop = true;
+
+ } else {
+ str = strcpy(result, g->Message);
+ } // endif jsp
+
+ } else
+ loop = true;
+
+ } while (loop);
+
+ } catch (int) {
+ str = strcpy(result, g->Message);
+ } catch (const char* msg) {
+ str = strcpy(result, msg);
+ } // end catch
+
+ free(buf);
+ } else
+ str = strcpy(result, "Buffer malloc failed");
+
+ if (fin) fclose(fin);
+ if (fout) fclose(fout);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ if (g->Message)
+ str = strcpy(result, g->Message);
+ else
+ str = strcpy(result, "Unexpected error");
+
+ } // endif str
+
+ *res_length = strlen(str);
+ return str;
+} // end of bfile_bjson
+
+void bfile_bjson_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_bjson_deinit
+
+/*********************************************************************************/
+/* Serialize a Json document. . */
+/*********************************************************************************/
+my_bool bson_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->args[0] && IsArgJson(args, 0) != 3) {
+ strcpy(message, "Argument must be a Jbin tree");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_serialize_init
+
+char *bson_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *error)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (IsArgJson(args, 0) == 3) {
+ PBSON bsp = (PBSON)args->args[0];
+ BJNX bnx(bsp->G);
+ PBVAL bvp = (args->arg_count == 1) ? (PBVAL)bsp->Jsp : (PBVAL)bsp->Top;
+
+// if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty)))
+ if (!(str = bnx.Serialize(g, bvp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else {
+ // *error = 1;
+ str = strcpy(result, "Argument is not a Jbin tree");
+ } // endif
+
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_serialize
+
+void bson_serialize_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_serialize_deinit
+
+/*********************************************************************************/
+/* Make and return a binary Json array containing all the parameters. */
+/* Note: jvp must be set before arp because it can be a binary argument. */
+/*********************************************************************************/
+my_bool bbin_make_array_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_make_array_init
+
+char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false)) {
+ BJNX bnx(g);
+ PBVAL jvp = bnx.MakeValue(args, 0);
+ PBVAL arp = bnx.NewVal(TYPE_JAR);
+
+ for (uint i = 0; i < args->arg_count;) {
+ bnx.AddArrayValue(arp, jvp);
+ jvp = bnx.MakeValue(args, ++i);
+ } // endfor i
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) {
+ strcat(bsp->Msg, " array");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif CheckMemory
+
+ } else
+ bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_make_array
+
+void bbin_make_array_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_make_array_deinit
+
+/*********************************************************************************/
+/* Add one value to a Json array. */
+/*********************************************************************************/
+my_bool bbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bbin_array_add_init
+
+char *bbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ uint n = 2;
+ int* x = GetIntArgPtr(g, args, n);
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jarp, top, jvp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type != TYPE_JAR) {
+ if ((jarp = bnx.NewVal(TYPE_JAR))) {
+ bnx.AddArrayValue(jarp, jvp);
+
+ if (!top)
+ top = jarp;
+
+ } // endif jarp
+
+ } else
+ jarp = jvp;
+
+ if (jarp) {
+ bnx.AddArrayValue(jarp, bnx.MakeValue(args, 1), x);
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_add
+
+void bbin_array_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_add_deinit
+
+/*********************************************************************************/
+/* Add one or several values to a Bson array. */
+/*********************************************************************************/
+my_bool bbin_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ return bson_array_add_values_init(initid, args, message);
+} // end of bbin_array_add_values_init
+
+char* bbin_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ uint i = 0;
+ BJNX bnx(g);
+ PBVAL arp, top, jvp = NULL;
+ PBVAL bvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bvp->Type == TYPE_JAR) {
+ arp = bvp;
+ i = 1;
+ } else // First argument is not an array
+ arp = bnx.NewVal(TYPE_JAR);
+
+ for (; i < args->arg_count; i++)
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, i));
+
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } // endif CheckMemory
+
+ // Keep result of constant function
+ g->Xchk = (g->N) ? bsp : NULL;
+ } else
+ bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_add_values
+
+void bbin_array_add_values_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_add_values_deinit
+
+/*********************************************************************************/
+/* Make a Json array from values coming from rows. */
+/*********************************************************************************/
+my_bool bbin_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_array_grp_init(initid, args, message);
+} // end of bbin_array_grp_init
+
+void bbin_array_grp_clear(UDF_INIT *initid, char *a, char *b)
+{
+ bson_array_grp_clear(initid, a, b);
+} // end of bbin_array_grp_clear
+
+void bbin_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b)
+{
+ bson_array_grp_add(initid, args, a, b);
+} // end of bbin_array_grp_add
+
+char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (arp)
+ if ((bsp = BbinAlloc(g, initid->max_length, arp)))
+ strcat(bsp->Msg, " array");
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_grp
+
+void bbin_array_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json object from values coming from rows. */
+/*********************************************************************************/
+my_bool bbin_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_grp_init(initid, args, message);
+} // end of bbin_object_grp_init
+
+void bbin_object_grp_clear(UDF_INIT *initid, char *a, char *b)
+{
+ bson_object_grp_clear(initid, a, b);
+} // end of bbin_object_grp_clear
+
+void bbin_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b)
+{
+ bson_object_grp_add(initid, args, a, b);
+} // end of bbin_object_grp_add
+
+char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (bop)
+ if ((bsp = BbinAlloc(g, initid->max_length, bop)))
+ strcat(bsp->Msg, " object");
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_grp
+
+void bbin_object_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the parameters. */
+/*********************************************************************************/
+my_bool bbin_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bbin_make_object_init
+
+char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_make_object
+
+void bbin_make_object_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_make_object_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all not null parameters. */
+/*********************************************************************************/
+my_bool bbin_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_nonull_init
+
+char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL jvp, objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i)))
+ bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_nonull
+
+void bbin_object_nonull_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_nonull_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the key/value parameters. */
+/*********************************************************************************/
+my_bool bbin_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count % 2) {
+ strcpy(message, "This function must have an even number of arguments");
+ return true;
+ } // endif arg_count
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_key_init
+
+char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i += 2)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_key
+
+void bbin_object_key_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_key_deinit
+
+/*********************************************************************************/
+/* Add or replace a value in a Json Object. */
+/*********************************************************************************/
+my_bool bbin_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_add_init
+
+char *bbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 2, false, true, true)) {
+ PSZ key;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top;
+ PBVAL jobp = bnx.MakeValue(args, 0, true, &top);
+ PBVAL jvp = jobp;
+
+ if (bnx.CheckPath(g, args, jvp, jobp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jobp && jobp->Type == TYPE_JOB) {
+ jvp = bnx.MakeValue(args, 1);
+ key = bnx.MakeKey(args, 1);
+ bnx.SetKeyValue(jobp, jvp, key);
+ bnx.SetChanged(true);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jobp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_add
+
+void bbin_object_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json array. */
+/*********************************************************************************/
+my_bool bbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_array_delete_init(initid, args, message);
+} // end of bbin_array_delete_init
+
+char *bbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ } else if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ int* x;
+ uint n = 1;
+ BJNX bnx(g);
+ PBVAL arp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (!(x = GetIntArgPtr(g, args, n)))
+ PUSH_WARNING("Missing or null array index");
+ else if (bnx.CheckPath(g, args, jvp, arp, 1))
+ PUSH_WARNING(g->Message);
+ else if (arp && arp->Type == TYPE_JAR) {
+ bnx.SetChanged(bnx.DeleteValue(arp, *x));
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } else {
+ PUSH_WARNING("First argument target is not an array");
+ // if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_delete
+
+void bbin_array_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_delete_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json object. */
+/*********************************************************************************/
+my_bool bbin_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument must be a key string");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_delete_init
+
+char *bbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 1, false, true, true)) {
+ PCSZ key;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top;
+ PBVAL jobp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, top, jobp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jobp && jobp->Type == TYPE_JOB) {
+ key = bnx.MakeKey(args, 1);
+ bnx.SetChanged(bnx.DeleteKey(jobp, key));
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_delete
+
+void bbin_object_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_delete_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object keys. */
+/*********************************************************************************/
+my_bool bbin_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_list_init(initid, args, message);
+} // end of bbin_object_list_init
+
+char *bbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL top, jarp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetKeyList(jsp);
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jsp type
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jarp;
+
+ } // endif CheckMemory
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_list
+
+void bbin_object_list_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_list_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object values. */
+/*********************************************************************************/
+my_bool bbin_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_values_init(initid, args, message);
+} // end of bbin_object_values_init
+
+char *bbin_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL top, jarp;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (jvp->Type == TYPE_JOB) {
+ jarp = bnx.GetObjectValList(jvp);
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jarp;
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = bsp;
+ } // endif const_item
+
+ } // endif bsp
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_values
+
+void bbin_object_values_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_values_deinit
+
+/*********************************************************************************/
+/* Get a Json item from a Json document. */
+/*********************************************************************************/
+my_bool bbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_get_item_init(initid, args, message);
+} // end of bbin_get_item_init
+
+char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ bsp = (PBSON)g->Xchk;
+ } else if (!CheckMemory(g, initid, args, 1, true, true)) {
+ char *path = MakePSZ(g, args, 1);
+ BJNX bnx(g, NULL, TYPE_STRING, initid->max_length);
+ PBVAL top, jvp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 1))
+ PUSH_WARNING(g->Message);
+ else if (jvp) {
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jvp;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif jvp
+
+ } else
+ PUSH_WARNING("CheckMemory error");
+
+ if (!bsp) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_get_item
+
+void bbin_get_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_get_item_deinit
+
+/*********************************************************************************/
+/* Merge two arrays or objects. */
+/*********************************************************************************/
+my_bool bbin_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_item_merge_init(initid, args, message);
+} // end of bbin_item_merge_init
+
+char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ JTYP type;
+ BJNX bnx(g);
+ PBVAL jvp, top = NULL;
+ PBVAL jsp[2] = {NULL, NULL};
+
+ for (int i = 0; i < 2; i++) {
+ if (i) {
+ jvp = bnx.MakeValue(args, i, true);
+
+ if (jvp->Type != type) {
+ PUSH_WARNING("Argument types mismatch");
+ goto fin;
+ } // endif type
+
+ } else {
+ jvp = bnx.MakeValue(args, i, true, &top);
+ type = (JTYP)jvp->Type;
+
+ if (type != TYPE_JAR && type != TYPE_JOB) {
+ PUSH_WARNING("First argument is not an array or object");
+ goto fin;
+ } // endif type
+
+ } // endif i
+
+ jsp[i] = jvp;
+ } // endfor i
+
+ if (type == TYPE_JAR)
+ bnx.MergeArray(jsp[0], jsp[1]);
+ else
+ bnx.MergeObject(jsp[0], jsp[1]);
+
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *res_length = 0;
+ *error = 1;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_item_merge
+
+void bbin_item_merge_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_item_merge_deinit
+
+/*********************************************************************************/
+/* This function is used by the jbin_set/insert/update_item functions. */
+/*********************************************************************************/
+static char *bbin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path;
+ int w;
+ my_bool b = true;
+ PBJNX bxp;
+ PBVAL jsp, jvp, top;
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Alchecked) {
+ bsp = (PBSON)g->Activityp;
+ goto fin;
+ } else if (g->N)
+ g->Alchecked = 1;
+
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
+ w = 1;
+ else if (!strcmp(result, "$update"))
+ w = 2;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto fin;
+ } // endelse
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, false, true)) {
+ throw 1;
+ } else {
+ BJNX bnx(g);
+
+ jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ g->More = (size_t)top;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else {
+ jsp = (PBVAL)g->Xchk;
+ top = (PBVAL)g->More;
+ } // endif Xchk
+
+ bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
+
+ for (uint i = 1; i + 1 < args->arg_count; i += 2) {
+ jvp = bxp->MakeValue(args, i);
+ path = MakePSZ(g, args, i + 1);
+
+ if (bxp->SetJpath(g, path, false))
+ throw 2;
+
+ if (w) {
+ bxp->ReadValue(g);
+ b = bxp->GetValue()->IsNull();
+ b = (w == 1) ? b : !b;
+ } // endif w
+
+ if (b && bxp->WriteValue(g, jvp))
+ throw 3;
+
+ bxp->SetChanged(true);
+ } // endfor i
+
+ if (!(bsp = bxp->MakeBinResult(args, top, initid->max_length)))
+ throw 4;
+
+ if (g->N)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)bsp;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ } // end catch
+
+fin:
+ if (!bsp) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_set_item_init
+
+char *bbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_set_item
+
+void bbin_set_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_set_item_deinit
+
+/*********************************************************************************/
+/* Insert Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_insert_item_init
+
+char *bbin_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$insert");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_insert_item
+
+void bbin_insert_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_insert_item_deinit
+
+/*********************************************************************************/
+/* Update Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_update_item_init
+
+char *bbin_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$update");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_update_item
+
+void bbin_update_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_update_item_deinit
+
+/*********************************************************************************/
+/* Delete items from a Json document. */
+/*********************************************************************************/
+my_bool bbin_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_delete_item_init(initid, args, message);
+} // end of bbin_delete_item_init
+
+char *bbin_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path;
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top, jar = NULL;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (args->arg_count == 1) {
+ // This should be coming from bbin_locate_all
+ jar = jvp; // This is the array of paths
+ jvp = top; // And this is the document
+ } else if(!bnx.IsJson(jvp)) {
+ PUSH_WARNING("First argument is not a JSON document");
+ goto fin;
+ } else if (args->arg_count == 2) {
+ // Check whether this is an array of paths
+ jar = bnx.MakeValue(args, 1, true);
+
+ if (jar && jar->Type != TYPE_JAR)
+ jar = NULL;
+
+ } // endif arg_count
+
+ if (jar) {
+ // Do the deletion in reverse order
+ for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) {
+ path = bnx.GetString(bnx.GetArrayValue(jar, i));
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ } else for (uint i = 1; i < args->arg_count; i++) {
+ path = MakePSZ(g, args, i);
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (args->arg_count == 1)
+ // Here Jsp was not a sub-item of top
+ bsp->Jsp = (PJSON)top;
+
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_delete_item
+
+void bbin_delete_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_delete_item_deinit
+
+/*********************************************************************************/
+/* Returns a json file as a json binary tree. */
+/*********************************************************************************/
+my_bool bbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_file_init(initid, args, message);
+} // end of bbin_file_init
+
+char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *fn;
+ int pretty = 3;
+ size_t len = 0;
+ PBVAL jsp, jvp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g);
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (bsp)
+ goto fin;
+
+ fn = MakePSZ(g, args, 0);
+
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
+
+ // Parse the json file and allocate its tree structure
+ if (!(jsp = bnx.ParseJsonFile(g, fn, pretty, len))) {
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ goto fin;
+ } // endif jsp
+
+// if (pretty == 3)
+// PUSH_WARNING("File pretty format cannot be determined");
+// else if (pretty == 3)
+// pretty = pty;
+
+ if ((bsp = BbinAlloc(bnx.G, len, jsp))) {
+ strcat(bsp->Msg, " file");
+ bsp->Filename = fn;
+ bsp->Pretty = pretty;
+ } else {
+ *error = 1;
+ goto fin;
+ } // endif bsp
+
+ // Check whether a path was specified
+ if (bnx.CheckPath(g, args, jsp, jvp, 1)) {
+ PUSH_WARNING(g->Message);
+ bsp = NULL;
+ goto fin;
+ } else if (jvp)
+ bsp->Jsp = (PJSON)jvp;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_file
+
+void bbin_file_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_file_deinit
+
+/*********************************************************************************/
+/* Locate all occurences of a value in a Json tree. */
+/*********************************************************************************/
+my_bool bbin_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ return bson_locate_all_init(initid, args, message);
+} // end of bbin_locate_all_init
+
+char* bbin_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char *path = NULL;
+ int mx = 10;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->N) {
+ if (g->Activityp) {
+ bsp = (PBSON)g->Activityp;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else {
+ *error = 1;
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ PBVAL top = NULL;
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ g->More = (size_t)top;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else {
+ bvp = (PBVAL)g->Xchk;
+ top = (PBVAL)g->More;
+ } // endif Xchk
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp2
+
+ if (args->arg_count > 2)
+ mx = (int)*(long long*)args->args[2];
+
+ if ((path = bnx.LocateAll(g, bvp, bvp2, mx))) {
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)bnx.ParseJson(g, path, strlen(path));
+ } // endif path
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)bsp;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_locate_all
+
+void bbin_locate_all_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_locate_all_deinit
+
+
diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h
new file mode 100644
index 00000000000..bbfd1ceed80
--- /dev/null
+++ b/storage/connect/bsonudf.h
@@ -0,0 +1,411 @@
+/******************** tabjson H Declares Source Code File (.H) *******************/
+/* Name: bsonudf.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
+/* */
+/* This file contains the BSON UDF function and class declares. */
+/*********************************************************************************/
+#pragma once
+#include "jsonudf.h"
+#include "bson.h"
+
+#if 0
+#define UDF_EXEC_ARGS \
+ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char*
+
+// BSON size should be equal on Linux and Windows
+#define BMX 255
+typedef struct BSON* PBSON;
+
+/***********************************************************************/
+/* Structure used to return binary json to Json UDF functions. */
+/***********************************************************************/
+struct BSON {
+ char Msg[BMX + 1];
+ char *Filename;
+ PGLOBAL G;
+ int Pretty;
+ ulong Reslen;
+ my_bool Changed;
+ PJSON Top;
+ PJSON Jsp;
+ PBSON Bsp;
+}; // end of struct BSON
+
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+
+/*********************************************************************************/
+/* The JSON tree node. Can be an Object or an Array. */
+/*********************************************************************************/
+typedef struct _jnode {
+ PSZ Key; // The key used for object
+ OPVAL Op; // Operator used for this node
+ PVAL CncVal; // To cont value used for OP_CNC
+ PVAL Valp; // The internal array VALUE
+ int Rank; // The rank in array
+ int Rx; // Read row number
+ int Nx; // Next to read row number
+} JNODE, *PJNODE;
+
+/*********************************************************************************/
+/* The JSON utility functions. */
+/*********************************************************************************/
+bool IsNum(PSZ s);
+char *NextChr(PSZ s, char sep);
+char *GetJsonNull(void);
+uint GetJsonGrpSize(void);
+my_bool JsonSubSet(PGLOBAL g, my_bool b = false);
+my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod = false);
+my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0);
+my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n,
+ my_bool m, my_bool obj = false, my_bool mod = false);
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i);
+int IsArgJson(UDF_ARGS* args, uint i);
+char *GetJsonFile(PGLOBAL g, char* fn);
+
+/*********************************************************************************/
+/* Structure JPN. Used to make the locate path. */
+/*********************************************************************************/
+typedef struct _jpn {
+ int Type;
+ PCSZ Key;
+ int N;
+} JPN, *PJPN;
+
+#endif // 0
+
+/* --------------------------- New Testing BJSON Stuff --------------------------*/
+extern uint JsonGrpSize;
+uint GetJsonGroupSize(void);
+
+
+typedef class BJNX* PBJNX;
+
+/*********************************************************************************/
+/* Class BJNX: BJSON access methods. */
+/*********************************************************************************/
+class BJNX : public BDOC {
+public:
+ // Constructors
+ BJNX(PGLOBAL g);
+ BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false);
+
+ // Implementation
+ int GetPrecision(void) { return Prec; }
+ PVAL GetValue(void) { return Value; }
+ void SetRow(PBVAL vp) { Row = vp; }
+ void SetChanged(my_bool b) { Changed = b; }
+
+ // Methods
+ my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false);
+ my_bool ParseJpath(PGLOBAL g);
+ void ReadValue(PGLOBAL g);
+ PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i);
+ PBVAL GetJson(PGLOBAL g);
+ my_bool CheckPath(PGLOBAL g);
+ my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n);
+ my_bool WriteValue(PGLOBAL g, PBVAL jvalp);
+ my_bool DeleteItem(PGLOBAL g, PBVAL vlp);
+ char *Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1);
+ char *LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10);
+ PSZ MakeKey(UDF_ARGS* args, int i);
+ PBVAL MakeValue(UDF_ARGS* args, uint i, bool b = false, PBVAL* top = NULL);
+ PBVAL MakeTypedValue(PGLOBAL g, UDF_ARGS* args, uint i,
+ JTYP type, PBVAL* top = NULL);
+ PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len);
+ char *MakeResult(UDF_ARGS* args, PBVAL top, uint n = 2);
+ PBSON MakeBinResult(UDF_ARGS* args, PBVAL top, ulong len, int n = 2);
+
+protected:
+ my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm);
+ PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i);
+ PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL GetCalcValue(PGLOBAL g, PBVAL bap, int n);
+ PBVAL MakeJson(PGLOBAL g, PBVAL bvp, int n);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp);
+ PBVAL GetRow(PGLOBAL g);
+ PBVAL MoveVal(PBVAL vlp);
+ PBVAL MoveJson(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveArray(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveObject(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveValue(PBJNX bxp, PBVAL jvp);
+ my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2);
+ my_bool LocateArray(PGLOBAL g, PBVAL jarp);
+ my_bool LocateObject(PGLOBAL g, PBVAL jobp);
+ my_bool LocateValue(PGLOBAL g, PBVAL jvp);
+ my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp);
+ my_bool LocateObjectAll(PGLOBAL g, PBVAL jobp);
+ my_bool LocateValueAll(PGLOBAL g, PBVAL jvp);
+ my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2);
+ my_bool AddPath(void);
+
+ // Default constructor not to be used
+ BJNX(void) {}
+
+ // Members
+ PBVAL Row;
+ PBVAL Bvalp;
+ PJPN Jpnp;
+ JOUTSTR *Jp;
+ JNODE *Nodes; // The intermediate objects
+ PVAL Value;
+ PVAL MulVal; // To value used by multiple column
+ char *Jpath; // The json path
+ int Buf_Type;
+ int Long;
+ int Prec;
+ int Nod; // The number of intermediate objects
+ int Xnod; // Index of multiple values
+ int K; // Kth item to locate
+ int I; // Index of JPN
+ int Imax; // Max number of JPN's
+ int B; // Index base
+ my_bool Xpd; // True for expandable column
+ my_bool Parsed; // True when parsed
+ my_bool Found; // Item found by locate
+ my_bool Wr; // Write mode
+ my_bool Jb; // Must return json item
+ my_bool Changed; // True when contains was modified
+}; // end of class BJNX
+
+extern "C" {
+ DllExport my_bool bson_test_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_test(UDF_EXEC_ARGS);
+ DllExport void bson_test_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonvalue_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonvalue(UDF_EXEC_ARGS);
+ DllExport void bsonvalue_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_make_array_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_make_array(UDF_EXEC_ARGS);
+ DllExport void bson_make_array_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_add_values(UDF_EXEC_ARGS);
+ DllExport void bson_array_add_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_add(UDF_EXEC_ARGS);
+ DllExport void bson_array_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_delete(UDF_EXEC_ARGS);
+ DllExport void bson_array_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonlocate_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonlocate(UDF_EXEC_ARGS);
+ DllExport void bsonlocate_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_locate_all_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_locate_all(UDF_EXEC_ARGS);
+ DllExport void bson_locate_all_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_contains_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bson_contains(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bson_contains_deinit(UDF_INIT*);
+
+ DllExport my_bool bsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsoncontains_path_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_make_object_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_make_object(UDF_EXEC_ARGS);
+ DllExport void bson_make_object_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_nonull(UDF_EXEC_ARGS);
+ DllExport void bson_object_nonull_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_key_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_key(UDF_EXEC_ARGS);
+ DllExport void bson_object_key_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_add(UDF_EXEC_ARGS);
+ DllExport void bson_object_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_delete(UDF_EXEC_ARGS);
+ DllExport void bson_object_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_list_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_list(UDF_EXEC_ARGS);
+ DllExport void bson_object_list_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_values(UDF_EXEC_ARGS);
+ DllExport void bson_object_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_item_merge_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_item_merge(UDF_EXEC_ARGS);
+ DllExport void bson_item_merge_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_get_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_get_item(UDF_EXEC_ARGS);
+ DllExport void bson_get_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_string_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonget_string(UDF_EXEC_ARGS);
+ DllExport void bsonget_string_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_int_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonget_int(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsonget_int_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_real_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport double bsonget_real(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsonget_real_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonset_def_prec_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonset_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bsonget_def_prec_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonget_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bsonget_grp_size_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonget_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bson_array_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bson_array_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bson_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bson_array_grp(UDF_EXEC_ARGS);
+ DllExport void bson_array_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bson_object_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bson_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bson_object_grp(UDF_EXEC_ARGS);
+ DllExport void bson_object_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_delete_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_delete_item(UDF_EXEC_ARGS);
+ DllExport void bson_delete_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_set_item(UDF_EXEC_ARGS);
+ DllExport void bson_set_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_insert_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_insert_item(UDF_EXEC_ARGS);
+ DllExport void bson_insert_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_update_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_update_item(UDF_EXEC_ARGS);
+ DllExport void bson_update_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_file_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_file(UDF_EXEC_ARGS);
+ DllExport void bson_file_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_make_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_make(UDF_EXEC_ARGS);
+ DllExport void bfile_make_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_convert_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_convert(UDF_EXEC_ARGS);
+ DllExport void bfile_convert_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_bjson(UDF_EXEC_ARGS);
+ DllExport void bfile_bjson_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_serialize_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_serialize(UDF_EXEC_ARGS);
+ DllExport void bson_serialize_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_make_array_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_make_array(UDF_EXEC_ARGS);
+ DllExport void bbin_make_array_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_add(UDF_EXEC_ARGS);
+ DllExport void bbin_array_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_add_values(UDF_EXEC_ARGS);
+ DllExport void bbin_array_add_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_delete(UDF_EXEC_ARGS);
+ DllExport void bbin_array_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bbin_array_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bbin_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bbin_array_grp(UDF_EXEC_ARGS);
+ DllExport void bbin_array_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bbin_object_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bbin_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bbin_object_grp(UDF_EXEC_ARGS);
+ DllExport void bbin_object_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_make_object_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_make_object(UDF_EXEC_ARGS);
+ DllExport void bbin_make_object_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_nonull(UDF_EXEC_ARGS);
+ DllExport void bbin_object_nonull_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_key_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_key(UDF_EXEC_ARGS);
+ DllExport void bbin_object_key_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_add(UDF_EXEC_ARGS);
+ DllExport void bbin_object_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_delete(UDF_EXEC_ARGS);
+ DllExport void bbin_object_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_list_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_list(UDF_EXEC_ARGS);
+ DllExport void bbin_object_list_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_values(UDF_EXEC_ARGS);
+ DllExport void bbin_object_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_get_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_get_item(UDF_EXEC_ARGS);
+ DllExport void bbin_get_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_item_merge_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_item_merge(UDF_EXEC_ARGS);
+ DllExport void bbin_item_merge_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_set_item(UDF_EXEC_ARGS);
+ DllExport void bbin_set_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_insert_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_insert_item(UDF_EXEC_ARGS);
+ DllExport void bbin_insert_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_update_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_update_item(UDF_EXEC_ARGS);
+ DllExport void bbin_update_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_delete_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_delete_item(UDF_EXEC_ARGS);
+ DllExport void bbin_delete_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_locate_all_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_locate_all(UDF_EXEC_ARGS);
+ DllExport void bbin_locate_all_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_file_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_file(UDF_EXEC_ARGS);
+ DllExport void bbin_file_deinit(UDF_INIT*);
+} // extern "C"
diff --git a/storage/connect/cmgfam.cpp b/storage/connect/cmgfam.cpp
index 579b5b919a7..690c087c2bb 100644
--- a/storage/connect/cmgfam.cpp
+++ b/storage/connect/cmgfam.cpp
@@ -1,11 +1,11 @@
/************** CMGFAM C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: cmgfam.cpp */
/* ------------- */
-/* Version 1.4 */
+/* Version 1.5 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 20017 */
+/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -29,7 +29,11 @@
#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "cmgfam.h"
#if defined(UNIX) || defined(UNIV_LINUX)
@@ -53,6 +57,7 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Pcg.Options = tdp->Options;
Pcg.Filter = tdp->Filter;
Pcg.Pipe = tdp->Pipe && tdp->Options != NULL;
+ Lrecl = tdp->Lrecl + tdp->Ending;
} else {
Pcg.Uristr = NULL;
Pcg.Db_name = NULL;
@@ -60,21 +65,55 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Pcg.Options = NULL;
Pcg.Filter = NULL;
Pcg.Pipe = false;
+ Lrecl = 0;
} // endif tdp
To_Fbt = NULL;
Mode = MODE_ANY;
Done = false;
- Lrecl = tdp->Lrecl + tdp->Ending;
} // end of CMGFAM standard constructor
- CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp)
+#if defined(BSON_SUPPORT)
+ /***********************************************************************/
+/* Constructors. */
+/***********************************************************************/
+CMGFAM::CMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL)
+{
+ Cmgp = NULL;
+ Pcg.Tdbp = NULL;
+
+ if (tdp) {
+ Pcg.Uristr = tdp->Uri;
+ Pcg.Db_name = tdp->Schema;
+ Pcg.Coll_name = tdp->Collname;
+ Pcg.Options = tdp->Options;
+ Pcg.Filter = tdp->Filter;
+ Pcg.Pipe = tdp->Pipe && tdp->Options != NULL;
+ Lrecl = tdp->Lrecl + tdp->Ending;
+ } else {
+ Pcg.Uristr = NULL;
+ Pcg.Db_name = NULL;
+ Pcg.Coll_name = NULL;
+ Pcg.Options = NULL;
+ Pcg.Filter = NULL;
+ Pcg.Pipe = false;
+ Lrecl = 0;
+ } // endif tdp
+
+ To_Fbt = NULL;
+ Mode = MODE_ANY;
+ Done = false;
+} // end of CMGFAM standard constructor
+#endif // BSON_SUPPORT
+
+CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp)
{
+ Cmgp = tdfp->Cmgp;
Pcg = tdfp->Pcg;
To_Fbt = tdfp->To_Fbt;
Mode = tdfp->Mode;
Done = tdfp->Done;
- } // end of CMGFAM copy constructor
+} // end of CMGFAM copy constructor
/***********************************************************************/
/* Reset: reset position values at the beginning of file. */
diff --git a/storage/connect/cmgfam.h b/storage/connect/cmgfam.h
index 7571f5c5309..9c5f91f0d23 100644
--- a/storage/connect/cmgfam.h
+++ b/storage/connect/cmgfam.h
@@ -1,7 +1,7 @@
/*************** CMGFam H Declares Source Code File (.H) ***************/
-/* Name: cmgfam.h Version 1.5 */
+/* Name: cmgfam.h Version 1.6 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */
/* */
/* This file contains the MongoDB access method classes declares. */
/***********************************************************************/
@@ -20,6 +20,9 @@ class DllExport CMGFAM : public DOSFAM {
public:
// Constructor
CMGFAM(PJDEF tdp);
+#if defined(BSON_SUPPORT)
+ CMGFAM(PBDEF tdp);
+#endif // BSON_SUPPORT
CMGFAM(PCMGFAM txfp);
// Implementation
diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp
index 242e68b5905..e42d9703ad7 100644
--- a/storage/connect/colblk.cpp
+++ b/storage/connect/colblk.cpp
@@ -79,8 +79,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp)
if (trace(2))
htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this);
- if (tdbp)
- {
+ if (tdbp) {
// Attach the new column to the table block
if (!tdbp->GetColumns())
tdbp->SetColumns(this);
@@ -90,6 +89,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp)
colp->Next = this;
} // endelse
}
+
} // end of COLBLK copy constructor
/***********************************************************************/
diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h
index b22933d9ebb..51ab32cfae2 100644
--- a/storage/connect/colblk.h
+++ b/storage/connect/colblk.h
@@ -62,7 +62,7 @@ class DllExport COLBLK : public XOBJECT {
bool IsVirtual(void) {return Cdp->IsVirtual();}
bool IsNullable(void) {return Nullable;}
void SetNullable(bool b) {Nullable = b;}
-
+ void SetName(PSZ name_var) { Name= name_var; }
// Methods
virtual void Reset(void);
virtual bool Compare(PXOB xp);
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 3b58e8b5a8f..ee62e0cd03e 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -73,8 +73,7 @@ PGLOBAL CntExit(PGLOBAL g)
g->Activityp = NULL;
} // endif Activityp
- PlugExit(g);
- g= NULL;
+ g= PlugExit(g);
} // endif g
return g;
@@ -295,9 +294,9 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
/* its column blocks in mode write (required by XML tables). */
/*******************************************************************/
if (mode == MODE_UPDATE) {
- PTDBASE utp;
+ PTDB utp;
- if (!(utp = (PTDBASE)tdbp->Duplicate(g))) {
+ if (!(utp = tdbp->Duplicate(g))) {
sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName());
throw 4;
} // endif tp
@@ -592,7 +591,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
if (!tdbp->IsRemote()) {
// Make all the eventual indexes
- PTDBDOS tbxp = (PTDBDOS)tdbp;
+ PTDBASE tbxp = (PTDBASE)tdbp;
tbxp->ResetKindex(g, NULL);
tbxp->SetKey_Col(NULL);
rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp
index 53150f9d8ae..f50290119ae 100644
--- a/storage/connect/filamap.cpp
+++ b/storage/connect/filamap.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -102,7 +102,7 @@ int MAPFAM::GetFileLength(PGLOBAL g)
bool MAPFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
MODE mode = Tdbp->GetMode();
PFBLOCK fp;
PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
@@ -170,13 +170,18 @@ bool MAPFAM::OpenTableFile(PGLOBAL g)
htrc("CreateFileMap: %s\n", g->Message);
return (mode == MODE_READ && rc == ENOENT)
- ? PushWarning(g, Tdbp) : true;
+ ? false : true;
+// ? PushWarning(g, Tdbp) : true; --> assert fails into MariaDB
} // endif hFile
/*******************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************/
- len = mm.lenL;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
Memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp
index 67ab120c499..84eab272cc5 100644
--- a/storage/connect/filamtxt.cpp
+++ b/storage/connect/filamtxt.cpp
@@ -1,11 +1,11 @@
/*********** File AM Txt C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMTXT */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -48,6 +48,7 @@
#include "plgdbsem.h"
#include "filamtxt.h"
#include "tabdos.h"
+#include "tabjson.h"
#if defined(UNIX) || defined(UNIV_LINUX)
#include "osutil.h"
@@ -804,14 +805,14 @@ int DOSFAM::ReadBuffer(PGLOBAL g)
Placed = false;
if (trace(2))
- htrc(" About to read: stream=%p To_Buf=%p Buflen=%d\n",
- Stream, To_Buf, Buflen);
+ htrc(" About to read: stream=%p To_Buf=%p Buflen=%d Fpos=%d\n",
+ Stream, To_Buf, Buflen, Fpos);
if (fgets(To_Buf, Buflen, Stream)) {
p = To_Buf + strlen(To_Buf) - 1;
if (trace(2))
- htrc(" Read: To_Buf=%p p=%c\n", To_Buf, To_Buf, p);
+ htrc(" Read: To_Buf=%p p=%c\n", To_Buf, p);
#if defined(__WIN__)
if (Bin) {
@@ -1663,3 +1664,456 @@ void BLKFAM::Rewind(void)
//Rbuf = 0; commented out in case we reuse last read block
} // end of Rewind
+/* --------------------------- Class BINFAM -------------------------- */
+
+#if 0
+/***********************************************************************/
+/* BIN GetFileLength: returns file size in number of bytes. */
+/***********************************************************************/
+int BINFAM::GetFileLength(PGLOBAL g)
+{
+ int len;
+
+ if (!Stream)
+ len = TXTFAM::GetFileLength(g);
+ else
+ if ((len = _filelength(_fileno(Stream))) < 0)
+ sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File);
+
+ xtrc(1, "File length=%d\n", len);
+ return len;
+} // end of GetFileLength
+
+/***********************************************************************/
+/* Cardinality: returns table cardinality in number of rows. */
+/* This function can be called with a null argument to test the */
+/* availability of Cardinality implementation (1 yes, 0 no). */
+/***********************************************************************/
+int BINFAM::Cardinality(PGLOBAL g)
+{
+ return (g) ? -1 : 0;
+} // end of Cardinality
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */
+/***********************************************************************/
+bool BINFAM::OpenTableFile(PGLOBAL g) {
+ char opmode[4], filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+ PDBUSER dbuserp = PlgGetUser(g);
+
+ switch (mode) {
+ case MODE_READ:
+ strcpy(opmode, "rb");
+ break;
+ case MODE_WRITE:
+ strcpy(opmode, "wb");
+ break;
+ default:
+ sprintf(g->Message, MSG(BAD_OPEN_MODE), mode);
+ return true;
+ } // endswitch Mode
+
+ // Now open the file stream
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!(Stream = PlugOpenFile(g, filename, opmode))) {
+ if (trace(1))
+ htrc("%s\n", g->Message);
+
+ return (mode == MODE_READ && errno == ENOENT)
+ ? PushWarning(g, Tdbp) : true;
+ } // endif Stream
+
+ if (trace(1))
+ htrc("File %s open Stream=%p mode=%s\n", filename, Stream, opmode);
+
+ To_Fb = dbuserp->Openlist; // Keep track of File block
+
+ /*********************************************************************/
+ /* Allocate the line buffer. */
+ /*********************************************************************/
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+#endif // 0
+
+/***********************************************************************/
+/* Allocate the line buffer. For mode Delete a bigger buffer has to */
+/* be allocated because is it also used to move lines into the file. */
+/***********************************************************************/
+bool BINFAM::AllocateBuffer(PGLOBAL g)
+{
+ MODE mode = Tdbp->GetMode();
+
+ // Lrecl is Ok
+ Buflen = Lrecl;
+
+ // Buffer will be allocated separately
+ if (mode == MODE_ANY) {
+ xtrc(1, "SubAllocating a buffer of %d bytes\n", Buflen);
+ To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
+ } else if (UseTemp || mode == MODE_DELETE) {
+ // Have a big buffer to move lines
+ Dbflen = Buflen * DOS_BUFF_LEN;
+ DelBuf = PlugSubAlloc(g, NULL, Dbflen);
+ } // endif mode
+
+ return false;
+#if 0
+ MODE mode = Tdbp->GetMode();
+
+ // Lrecl is Ok
+ Dbflen = Buflen = Lrecl;
+
+ if (trace(1))
+ htrc("SubAllocating a buffer of %d bytes\n", Buflen);
+
+ DelBuf = To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
+ return false;
+#endif // 0
+} // end of AllocateBuffer
+
+#if 0
+/***********************************************************************/
+/* GetRowID: return the RowID of last read record. */
+/***********************************************************************/
+int BINFAM::GetRowID(void) {
+ return Rows;
+} // end of GetRowID
+
+/***********************************************************************/
+/* GetPos: return the position of last read record. */
+/***********************************************************************/
+int BINFAM::GetPos(void) {
+ return Fpos;
+} // end of GetPos
+
+/***********************************************************************/
+/* GetNextPos: return the position of next record. */
+/***********************************************************************/
+int BINFAM::GetNextPos(void) {
+ return ftell(Stream);
+} // end of GetNextPos
+
+/***********************************************************************/
+/* SetPos: Replace the table at the specified position. */
+/***********************************************************************/
+bool BINFAM::SetPos(PGLOBAL g, int pos) {
+ Fpos = pos;
+
+ if (fseek(Stream, Fpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos);
+ return true;
+ } // endif
+
+ Placed = true;
+ return false;
+} // end of SetPos
+
+/***********************************************************************/
+/* Record file position in case of UPDATE or DELETE. */
+/***********************************************************************/
+bool BINFAM::RecordPos(PGLOBAL g) {
+ if ((Fpos = ftell(Stream)) < 0) {
+ sprintf(g->Message, MSG(FTELL_ERROR), 0, strerror(errno));
+ // strcat(g->Message, " (possible wrong ENDING option value)");
+ return true;
+ } // endif Fpos
+
+ return false;
+} // end of RecordPos
+#endif // 0
+
+/***********************************************************************/
+/* ReadBuffer: Read one line for a text file. */
+/***********************************************************************/
+int BINFAM::ReadBuffer(PGLOBAL g)
+{
+ int rc;
+
+ if (!Stream)
+ return RC_EF;
+
+ xtrc(2, "ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n",
+ Tdbp, Tdbp->GetLine(), Placed);
+
+ if (!Placed) {
+ /*******************************************************************/
+ /* Record file position in case of UPDATE or DELETE. */
+ /*******************************************************************/
+ if (RecordPos(g))
+ return RC_FX;
+
+ CurBlk = (int)Rows++;
+ xtrc(2, "ReadBuffer: CurBlk=%d\n", CurBlk);
+ } else
+ Placed = false;
+
+ xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d Fpos=%d\n",
+ Stream, To_Buf, Buflen, Fpos);
+
+ // Read the prefix giving the row length
+ if (!fread(&Recsize, sizeof(size_t), 1, Stream)) {
+ if (!feof(Stream)) {
+ strcpy(g->Message, "Error reading line prefix\n");
+ return RC_FX;
+ } else
+ return RC_EF;
+
+ } else if (Recsize > (unsigned)Buflen) {
+ sprintf(g->Message, "Record too big (Recsize=%zd Buflen=%d)\n", Recsize, Buflen);
+ return RC_FX;
+ } // endif Recsize
+
+ if (fread(To_Buf, Recsize, 1, Stream)) {
+ xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize);
+ num_read++;
+ rc = RC_OK;
+ } else if (feof(Stream)) {
+ rc = RC_EF;
+ } else {
+#if defined(__WIN__)
+ sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL));
+#else
+ sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(0));
+#endif
+ xtrc(2, "%s\n", g->Message);
+ rc = RC_FX;
+ } // endif's fread
+
+ xtrc(2, "ReadBuffer: rc=%d\n", rc);
+ IsRead = true;
+ return rc;
+} // end of ReadBuffer
+
+/***********************************************************************/
+/* WriteBuffer: File write routine for BIN access method. */
+/***********************************************************************/
+int BINFAM::WriteBuffer(PGLOBAL g)
+{
+ int curpos = 0;
+ bool moved = true;
+
+ // T_Stream is the temporary stream or the table file stream itself
+ if (!T_Stream) {
+ if (UseTemp && Tdbp->GetMode() == MODE_UPDATE) {
+ if (OpenTempFile(g))
+ return RC_FX;
+
+ } else
+ T_Stream = Stream;
+
+ } // endif T_Stream
+
+ if (Tdbp->GetMode() == MODE_UPDATE) {
+ /*******************************************************************/
+ /* Here we simply rewrite a record on itself. There are two cases */
+ /* were another method should be used, a/ when Update apply to */
+ /* the whole file, b/ when updating the last field of a variable */
+ /* length file. The method could be to rewrite a new file, then */
+ /* to erase the old one and rename the new updated file. */
+ /*******************************************************************/
+ curpos = ftell(Stream);
+
+ if (trace(1))
+ htrc("Last : %d cur: %d\n", Fpos, curpos);
+
+ if (UseTemp) {
+ /*****************************************************************/
+ /* We are using a temporary file. */
+ /* Before writing the updated record, we must eventually copy */
+ /* all the intermediate records that have not been updated. */
+ /*****************************************************************/
+ if (MoveIntermediateLines(g, &moved))
+ return RC_FX;
+
+ Spos = curpos; // New start position
+ } else
+ // Update is directly written back into the file,
+ // with this (fast) method, record size cannot change.
+ if (fseek(Stream, Fpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), 0);
+ return RC_FX;
+ } // endif
+
+ } // endif mode
+
+ /*********************************************************************/
+ /* Prepare writing the line. */
+ /*********************************************************************/
+//memcpy(To_Buf, Tdbp->GetLine(), Recsize);
+
+ /*********************************************************************/
+ /* Now start the writing process. */
+ /*********************************************************************/
+ if (fwrite(&Recsize, sizeof(size_t), 1, T_Stream) != 1) {
+ sprintf(g->Message, "Error %d writing prefix to %s",
+ errno, To_File);
+ return RC_FX;
+ } else if (fwrite(To_Buf, Recsize, 1, T_Stream) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, Recsize, To_File);
+ return RC_FX;
+ } // endif fwrite
+
+ if (Tdbp->GetMode() == MODE_UPDATE && moved)
+ if (fseek(Stream, curpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno));
+ return RC_FX;
+ } // endif
+
+ xtrc(1, "Binary write done\n");
+ return RC_OK;
+} // end of WriteBuffer
+
+#if 0
+/***********************************************************************/
+/* Data Base delete line routine for DOS and BLK access methods. */
+/***********************************************************************/
+int DOSFAM::DeleteRecords(PGLOBAL g, int irc)
+{
+ bool moved;
+ int curpos = ftell(Stream);
+
+ /*********************************************************************/
+ /* There is an alternative here: */
+ /* 1 - use a temporary file in which are copied all not deleted */
+ /* lines, at the end the original file will be deleted and */
+ /* the temporary file renamed to the original file name. */
+ /* 2 - directly move the not deleted lines inside the original */
+ /* file, and at the end erase all trailing records. */
+ /* This will be experimented. */
+ /*********************************************************************/
+ if (trace(1))
+ htrc(
+ "DOS DeleteDB: rc=%d UseTemp=%d curpos=%d Fpos=%d Tpos=%d Spos=%d\n",
+ irc, UseTemp, curpos, Fpos, Tpos, Spos);
+
+ if (irc != RC_OK) {
+ /*******************************************************************/
+ /* EOF: position Fpos at the end-of-file position. */
+ /*******************************************************************/
+ fseek(Stream, 0, SEEK_END);
+ Fpos = ftell(Stream);
+
+ if (trace(1))
+ htrc("Fpos placed at file end=%d\n", Fpos);
+
+ } // endif irc
+
+ if (Tpos == Spos) {
+ /*******************************************************************/
+ /* First line to delete, Open temporary file. */
+ /*******************************************************************/
+ if (UseTemp) {
+ if (OpenTempFile(g))
+ return RC_FX;
+
+ } else {
+ /*****************************************************************/
+ /* Move of eventual preceding lines is not required here. */
+ /* Set the target file as being the source file itself. */
+ /* Set the future Tpos, and give Spos a value to block copying. */
+ /*****************************************************************/
+ T_Stream = Stream;
+ Spos = Tpos = Fpos;
+ } // endif UseTemp
+
+ } // endif Tpos == Spos
+
+ /*********************************************************************/
+ /* Move any intermediate lines. */
+ /*********************************************************************/
+ if (MoveIntermediateLines(g, &moved))
+ return RC_FX;
+
+ if (irc == RC_OK) {
+ /*******************************************************************/
+ /* Reposition the file pointer and set Spos. */
+ /*******************************************************************/
+ if (!UseTemp || moved)
+ if (fseek(Stream, curpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), 0);
+ return RC_FX;
+ } // endif
+
+ Spos = GetNextPos(); // New start position
+
+ if (trace(1))
+ htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos);
+
+ } else {
+ /*******************************************************************/
+ /* Last call after EOF has been reached. */
+ /* The UseTemp case is treated in CloseTableFile. */
+ /*******************************************************************/
+ if (!UseTemp & !Abort) {
+ /*****************************************************************/
+ /* Because the chsize functionality is only accessible with a */
+ /* system call we must close the file and reopen it with the */
+ /* open function (_fopen for MS ??) this is still to be checked */
+ /* for compatibility with Text files and other OS's. */
+ /*****************************************************************/
+ char filename[_MAX_PATH];
+ int h; // File handle, return code
+
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+ /*rc=*/ PlugCloseFile(g, To_Fb);
+
+ if ((h= global_open(g, MSGID_OPEN_STRERROR, filename, O_WRONLY)) <= 0)
+ return RC_FX;
+
+ /*****************************************************************/
+ /* Remove extra records. */
+ /*****************************************************************/
+#if defined(__WIN__)
+ if (chsize(h, Tpos)) {
+ sprintf(g->Message, MSG(CHSIZE_ERROR), strerror(errno));
+ close(h);
+ return RC_FX;
+ } // endif
+#else
+ if (ftruncate(h, (off_t)Tpos)) {
+ sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno));
+ close(h);
+ return RC_FX;
+ } // endif
+#endif
+
+ close(h);
+
+ if (trace(1))
+ htrc("done, h=%d irc=%d\n", h, irc);
+
+ } // endif !UseTemp
+
+ } // endif irc
+
+ return RC_OK; // All is correct
+} // end of DeleteRecords
+
+/***********************************************************************/
+/* Table file close routine for DOS access method. */
+/***********************************************************************/
+void BINFAM::CloseTableFile(PGLOBAL g, bool abort)
+{
+ int rc;
+
+ Abort = abort;
+ rc = PlugCloseFile(g, To_Fb);
+ xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc);
+ Stream = NULL; // So we can know whether table is open
+} // end of CloseTableFile
+
+/***********************************************************************/
+/* Rewind routine for BIN access method. */
+/***********************************************************************/
+void BINFAM::Rewind(void)
+{
+ if (Stream) // Can be NULL when making index on void table
+ rewind(Stream);
+
+ Rows = 0;
+ OldBlk = CurBlk = -1;
+} // end of Rewind
+#endif // 0
diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h
index 1fdae8fcd37..353e06ad3bd 100644
--- a/storage/connect/filamtxt.h
+++ b/storage/connect/filamtxt.h
@@ -1,7 +1,7 @@
/************** FilAMTxt H Declares Source Code File (.H) **************/
-/* Name: FILAMTXT.H Version 1.3 */
+/* Name: FILAMTXT.H Version 1.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* This file contains the file access method classes declares. */
/***********************************************************************/
@@ -15,6 +15,7 @@
typedef class TXTFAM *PTXF;
typedef class DOSFAM *PDOSFAM;
typedef class BLKFAM *PBLKFAM;
+typedef class BINFAM *PBINFAM;
typedef class DOSDEF *PDOSDEF;
typedef class TDBDOS *PTDBDOS;
@@ -210,4 +211,44 @@ class DllExport BLKFAM : public DOSFAM {
bool Closing; // True when closing on Update
}; // end of class BLKFAM
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for binary */
+/* files with variable record format (BJSON) */
+/***********************************************************************/
+class DllExport BINFAM : public DOSFAM {
+public:
+ // Constructor
+ BINFAM(PDOSDEF tdp) : DOSFAM(tdp) {Recsize = 0;}
+ BINFAM(PBINFAM txfp) : DOSFAM(txfp) {Recsize = txfp->Recsize;}
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_BIN;}
+//virtual int GetPos(void);
+//virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g) { return (PTXF)new(g) BINFAM(this); }
+
+ // Methods
+//virtual void Reset(void) {TXTFAM::Reset();}
+//virtual int GetFileLength(PGLOBAL g);
+//virtual int Cardinality(PGLOBAL g);
+ virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool AllocateBuffer(PGLOBAL g);
+//virtual int GetRowID(void);
+//virtual bool RecordPos(PGLOBAL g);
+//virtual bool SetPos(PGLOBAL g, int recpos);
+ virtual int SkipRecord(PGLOBAL g, bool header) {return RC_OK;}
+//virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+//virtual int DeleteRecords(PGLOBAL g, int irc);
+//virtual void CloseTableFile(PGLOBAL g, bool abort);
+//virtual void Rewind(void);
+
+//protected:
+//virtual int InitDelete(PGLOBAL g, int fpos, int spos);
+
+ // Members
+ size_t Recsize; // Length of last read or next written record
+}; // end of class BINFAM
+
#endif // __FILAMTXT_H
diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp
index 49283f8c0c7..97f29dddc7e 100644
--- a/storage/connect/filamvct.cpp
+++ b/storage/connect/filamvct.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -1328,7 +1328,7 @@ VCMFAM::VCMFAM(PVCMFAM txfp) : VCTFAM(txfp)
bool VCMFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
MODE mode = Tdbp->GetMode();
PFBLOCK fp = NULL;
PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
@@ -1422,10 +1422,14 @@ bool VCMFAM::OpenTableFile(PGLOBAL g)
} // endif hFile
/*******************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************/
- len = mm.lenL;
- Memory = (char *)mm.memory;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ Memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
CloseFileHandle(hFile);
@@ -2763,7 +2767,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g)
bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
HANDLE hFile;
MEMMAP mm;
PFBLOCK fp;
@@ -2817,8 +2821,12 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i)
/*****************************************************************/
/* Get the file size (assuming file is smaller than 4 GB) */
/*****************************************************************/
- len = mm.lenL;
- Memcol[i] = (char *)mm.memory;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ Memcol[i] = (char *)mm.memory;
if (!len) { // Empty or deleted file
CloseFileHandle(hFile);
@@ -4110,7 +4118,8 @@ bool BGVFAM::CleanUnusedSpace(PGLOBAL g)
} else {
int req;
- memset(To_Buf, 0, Buflen);
+ if (To_Buf)
+ memset(To_Buf, 0, Buflen);
for (n = Fpos - Tpos; n > 0; n -= req) {
/*****************************************************************/
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index eb14e846120..79599382693 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -154,10 +154,10 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
strcpy(filename, pat);
#if defined(__WIN__)
+ int rc;
char drive[_MAX_DRIVE], direc[_MAX_DIR];
WIN32_FIND_DATA FileData;
HANDLE hSearch;
- int rc;
_splitpath(filename, drive, direc, NULL, NULL);
@@ -1207,7 +1207,7 @@ int UZDFAM::Cardinality(PGLOBAL g)
return 1;
int card = -1;
- int len = GetFileLength(g);
+ GetFileLength(g);
card = Records;
diff --git a/storage/connect/global.h b/storage/connect/global.h
index d17620861fa..8774285e54b 100644
--- a/storage/connect/global.h
+++ b/storage/connect/global.h
@@ -185,7 +185,7 @@ typedef struct _global { /* Global structure */
size_t Sarea_Size; /* Work area size */
PACTIVITY Activityp;
char Message[MAX_STR]; /* Message (result, error, trace) */
- ulong More; /* Used by jsonudf */
+ size_t More; /* Used by jsonudf */
size_t Saved_Size; /* Saved work area to_free */
bool Createas; /* To pass multi to ext tables */
void *Xchk; /* indexes in create/alter */
@@ -208,7 +208,7 @@ DllExport char *PlugGetMessage(PGLOBAL, int);
DllExport short GetLineLength(PGLOBAL); // Console line length
#endif // __WIN__
DllExport PGLOBAL PlugInit(LPCSTR, size_t); // Plug global initialization
-DllExport int PlugExit(PGLOBAL); // Plug global termination
+DllExport PGLOBAL PlugExit(PGLOBAL); // Plug global termination
DllExport LPSTR PlugRemoveType(LPSTR, LPCSTR);
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir);
DllExport BOOL PlugIsAbsolutePath(LPCSTR path);
@@ -220,30 +220,11 @@ DllExport char *PlugDup(PGLOBAL g, const char *str);
DllExport void htrc(char const *fmt, ...);
DllExport void xtrc(uint, char const* fmt, ...);
DllExport uint GetTraceValue(void);
+DllExport void* MakePtr(void* memp, size_t offset);
+DllExport size_t MakeOff(void* memp, void* ptr);
#if defined(__cplusplus)
} // extern "C"
#endif
-/***********************************************************************/
-/* Inline routine definitions. */
-/***********************************************************************/
-/***********************************************************************/
-/* This routine makes a pointer from an offset to a memory pointer. */
-/***********************************************************************/
-inline void* MakePtr(void* memp, size_t offset) {
- // return ((offset == 0) ? NULL : &((char*)memp)[offset]);
- return (!offset) ? NULL : (char *)memp + offset;
-} /* end of MakePtr */
-
-/***********************************************************************/
-/* This routine makes an offset from a pointer new format. */
-/***********************************************************************/
-inline size_t MakeOff(void* memp, void* ptr) {
-#if defined(_DEBUG)
- assert(ptr > memp);
-#endif // _DEBUG
- return ((!ptr) ? 0 : (size_t)((char*)ptr - (size_t)memp));
-} /* end of MakeOff */
-
/*-------------------------- End of Global.H --------------------------*/
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index f8bf8804246..0be262f6a63 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -170,7 +170,7 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.07.0002 October 18, 2020";
+ char version[]= "Version 1.07.0002 January 27, 2021";
#if defined(__WIN__)
char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__;
char slash= '\\';
@@ -230,6 +230,9 @@ char *GetUserVariable(PGLOBAL g, const uchar *varname)
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
PQRYRES VirColumns(PGLOBAL g, bool info);
PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info);
+#ifdef BSON_SUPPORT
+PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info);
+#endif // BSON_SUPPORT
PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info);
#if defined(REST_SUPPORT)
PQRYRES RESTColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
@@ -251,11 +254,15 @@ bool ExactInfo(void);
USETEMP UseTemp(void);
int GetConvSize(void);
TYPCONV GetTypeConv(void);
+int GetDefaultDepth(void);
+int GetDefaultPrec(void);
bool JsonAllPath(void);
char *GetJsonNull(void);
-int GetDefaultDepth(void);
uint GetJsonGrpSize(void);
char *GetJavaWrapper(void);
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void);
+#endif // BSON_SUPPORT
size_t GetWorkSize(void);
void SetWorkSize(size_t);
extern "C" const char *msglang(void);
@@ -279,7 +286,12 @@ static char *strz(PGLOBAL g, LEX_CSTRING &ls)
{
char *str= (char*)PlugSubAlloc(g, NULL, ls.length + 1);
- memcpy(str, ls.str, ls.length);
+ /*
+ ls.str can be NULL, for example when called with
+ create_info->connect_string
+ */
+ if (ls.str)
+ memcpy(str, ls.str, ls.length);
str[ls.length]= 0;
return str;
} // end of strz
@@ -397,7 +409,7 @@ static MYSQL_THDVAR_ENUM(
// Adding JPATH to all Json table columns
static MYSQL_THDVAR_BOOL(json_all_path, PLUGIN_VAR_RQCMDARG,
"Adding JPATH to all Json table columns",
- NULL, NULL, 0); // NO by default
+ NULL, NULL, 1); // YES by default
// Null representation for JSON values
static MYSQL_THDVAR_STR(json_null,
@@ -410,11 +422,17 @@ static MYSQL_THDVAR_STR(json_null,
static MYSQL_THDVAR_INT(default_depth,
PLUGIN_VAR_RQCMDARG,
"Default depth used by Json, XML and Mongo discovery",
- NULL, NULL, 0, -1, 16, 1);
+ NULL, NULL, 5, -1, 16, 1); // Defaults to 5
+
+// Default precision for doubles
+static MYSQL_THDVAR_INT(default_prec,
+ PLUGIN_VAR_RQCMDARG,
+ "Default precision used for doubles",
+ NULL, NULL, 6, 0, 16, 1); // Defaults to 6
// Estimate max number of rows for JSON aggregate functions
static MYSQL_THDVAR_UINT(json_grp_size,
- PLUGIN_VAR_RQCMDARG, // opt
+ PLUGIN_VAR_RQCMDARG, // opt
"max number of rows for JSON aggregate functions.",
NULL, NULL, JSONMAX, 1, INT_MAX, 1);
@@ -439,6 +457,13 @@ static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG,
#endif // !version 2,3
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+// Force using BSON for JSON tables
+static MYSQL_THDVAR_BOOL(force_bson, PLUGIN_VAR_RQCMDARG,
+ "Force using BSON for JSON tables",
+ NULL, NULL, 0); // NO by default
+#endif // BSON_SUPPORT
+
#if defined(XMSG) || defined(NEWMSG)
const char *language_names[]=
{
@@ -480,6 +505,7 @@ TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);}
char *GetJsonNull(void)
{return connect_hton ? THDVAR(current_thd, json_null) : NULL;}
int GetDefaultDepth(void) {return THDVAR(current_thd, default_depth);}
+int GetDefaultPrec(void) {return THDVAR(current_thd, default_prec);}
uint GetJsonGrpSize(void)
{return connect_hton ? THDVAR(current_thd, json_grp_size) : 10;}
size_t GetWorkSize(void) {return (size_t)THDVAR(current_thd, work_size);}
@@ -501,6 +527,10 @@ char *GetJavaWrapper(void)
bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);}
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void) {return THDVAR(current_thd, force_bson);}
+#endif // BSON_SUPPORT)
+
#if defined(XMSG) || defined(NEWMSG)
extern "C" const char *msglang(void)
{return language_names[THDVAR(current_thd, msg_lang)];}
@@ -1051,12 +1081,12 @@ static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp)
/****************************************************************************/
TABTYPE ha_connect::GetRealType(PTOS pos)
{
- TABTYPE type;
+ TABTYPE type= TAB_UNDEF;
if (pos || (pos= GetTableOptionStruct())) {
type= GetTypeID(pos->type);
- if (type == TAB_UNDEF)
+ if (type == TAB_UNDEF && !pos->http)
type= pos->srcdef ? TAB_MYSQL : pos->tabname ? TAB_PRX : TAB_DOS;
#if defined(REST_SUPPORT)
else if (pos->http)
@@ -1064,7 +1094,8 @@ TABTYPE ha_connect::GetRealType(PTOS pos)
case TAB_JSON:
case TAB_XML:
case TAB_CSV:
- type = TAB_REST;
+ case TAB_UNDEF:
+ type = TAB_REST;
break;
case TAB_REST:
type = TAB_NIY;
@@ -1074,8 +1105,7 @@ TABTYPE ha_connect::GetRealType(PTOS pos)
} // endswitch type
#endif // REST_SUPPORT
- } else
- type= TAB_UNDEF;
+ } // endif pos
return type;
} // end of GetRealType
@@ -1387,7 +1417,7 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef)
PTOS options= GetTableOptionStruct();
if (!stricmp(opname, "Connect")) {
- LEX_CSTRING cnc= (tshp) ? tshp->connect_string
+ LEX_CSTRING cnc= (tshp) ? tshp->connect_string
: table->s->connect_string;
if (cnc.length)
@@ -1573,6 +1603,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
// Now get column information
pcf->Name= (char*)fp->field_name.str;
+ chset = (char*)fp->charset()->name;
if (fop && fop->special) {
pcf->Fieldfmt= (char*)fop->special;
@@ -1583,8 +1614,15 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Scale= 0;
pcf->Opt= (fop) ? (int)fop->opt : 0;
- if ((pcf->Length= fp->field_length) < 0)
- pcf->Length= 256; // BLOB?
+ if (fp->field_length >= 0) {
+ pcf->Length = fp->field_length;
+
+ // length is bytes for Connect, not characters
+ if (!strnicmp(chset, "utf8", 4))
+ pcf->Length /= 3;
+
+ } else
+ pcf->Length= 256; // BLOB?
pcf->Precision= pcf->Length;
@@ -1601,8 +1639,6 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Fieldfmt= NULL;
} // endif fop
- chset= (char *)fp->charset()->name;
-
if (!strcmp(chset, "binary"))
v = 'B'; // Binary string
@@ -2156,7 +2192,6 @@ int ha_connect::MakeRecord(char *buf)
int rc= 0;
Field* *field;
Field *fp;
- my_bitmap_map *org_bitmap;
CHARSET_INFO *charset= tdbp->data_charset();
//MY_BITMAP readmap;
MY_BITMAP *map;
@@ -2170,7 +2205,7 @@ int ha_connect::MakeRecord(char *buf)
*table->def_read_set.bitmap, *table->def_write_set.bitmap);
// Avoid asserts in field::store() for columns that are not updated
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
// This is for variable_length rows
memset(buf, 0, table->s->null_bytes);
@@ -2197,7 +2232,7 @@ int ha_connect::MakeRecord(char *buf)
continue;
htrc("Column %s not found\n", fp->field_name.str);
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(HA_ERR_WRONG_IN_RECORD);
} // endif colp
@@ -2257,7 +2292,7 @@ int ha_connect::MakeRecord(char *buf)
sprintf(buf, "Out of range value %.140s for column '%s' at row %ld",
value->GetCharString(val),
- fp->field_name.str,
+ fp->field_name.str,
thd->get_stmt_da()->current_row_for_warning());
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf);
@@ -2280,7 +2315,7 @@ int ha_connect::MakeRecord(char *buf)
memcpy(buf, table->record[0], table->s->stored_rec_length);
// This is copied from ha_tina and is necessary to avoid asserts
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(rc);
} // end of MakeRecord
@@ -2300,7 +2335,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
//PTDBASE tp= (PTDBASE)tdbp;
String attribute(attr_buffer, sizeof(attr_buffer),
table->s->table_charset);
- my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *bmap= dbug_tmp_use_all_columns(table, &table->read_set);
const CHARSET_INFO *charset= tdbp->data_charset();
String data_charset_value(data_buffer, sizeof(data_buffer), charset);
@@ -2422,7 +2457,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
} // endfor field
err:
- dbug_tmp_restore_column_map(table->read_set, bmap);
+ dbug_tmp_restore_column_map(&table->read_set, bmap);
return rc;
} // end of ScanRecord
@@ -2470,7 +2505,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
OPVAL op;
Field *fp;
const key_range *ranges[2];
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
KEY *kfp;
KEY_PART_INFO *kpart;
@@ -2487,7 +2522,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
both= ranges[0] && ranges[1];
kfp= &table->key_info[active_index];
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (i= 0; i <= 1; i++) {
if (ranges[i] == NULL)
@@ -2582,11 +2617,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
if ((oom= qry->IsTruncated()))
strcpy(g->Message, "Out of memory");
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return oom;
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return true;
} // end of MakeKeyWhere
@@ -2799,7 +2834,6 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
} else {
char buff[256];
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
- Item_basic_constant *pval= (Item_basic_constant *)args[i];
PPARM pp= (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM));
// IN and BETWEEN clauses should be col VOP list
@@ -2808,6 +2842,8 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
switch (args[i]->real_type()) {
case COND::CONST_ITEM:
+ {
+ Item *pval= (Item *)args[i];
switch (args[i]->cmp_type()) {
case STRING_RESULT:
res= pval->val_str(&tmp);
@@ -2834,6 +2870,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
DBUG_ASSERT(0);
return NULL;
}
+ }
break;
case COND::CACHE_ITEM: // Possible ???
case COND::NULL_ITEM: // TODO: handle this
@@ -3089,7 +3126,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
} else {
char buff[256];
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
- Item_basic_constant *pval= (Item_basic_constant *)args[i];
+ Item *pval= (Item *)args[i];
Item::Type type= args[i]->real_type();
switch (type) {
@@ -4501,7 +4538,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_VEC:
case TAB_REST:
case TAB_JSON:
- if (options->filename && *options->filename) {
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ if (options->filename && *options->filename) {
if (!quick) {
char path[FN_REFLEN], dbpath[FN_REFLEN];
@@ -4532,11 +4572,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_DIR:
case TAB_ZIP:
case TAB_OEM:
- if (table && table->pos_in_table_list) { // if SELECT
+ if (table && table->pos_in_table_list) { // if SELECT
#if MYSQL_VERSION_ID > 100200
Switch_to_definer_security_ctx backup_ctx(thd, table->pos_in_table_list);
#endif // VERSION_ID > 100200
-
return check_global_access(thd, FILE_ACL);
} else
return check_global_access(thd, FILE_ACL);
@@ -4552,9 +4591,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_OCCUR:
case TAB_PIVOT:
case TAB_VIR:
+ default:
// This is temporary until a solution is found
return false;
- } // endswitch type
+ } // endswitch type
my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0));
return true;
@@ -4805,6 +4845,7 @@ int ha_connect::start_stmt(THD *thd, thr_lock_type lock_type)
lock.cc by lock_external() and unlock_external() in lock.cc;
the section "locking functions for mysql" in lock.cc;
copy_data_between_tables() in sql_table.cc.
+
*/
int ha_connect::external_lock(THD *thd, int lock_type)
{
@@ -4937,11 +4978,11 @@ int ha_connect::external_lock(THD *thd, int lock_type)
// Here we do make the new indexes
if (tdp->MakeIndex(g, adp, true) == RC_FX) {
// Make it a warning to avoid crash
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- 0, g->Message);
- rc= 0;
- //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- //rc= HA_ERR_INTERNAL_ERROR;
+ //push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ // 0, g->Message);
+ //rc= 0;
+ my_message(ER_TOO_MANY_KEYS, g->Message, MYF(0));
+ rc= HA_ERR_INDEX_CORRUPT;
} // endif MakeIndex
} else if (tdbp->GetDef()->Indexable() == 3) {
@@ -5351,7 +5392,8 @@ static char *encode(PGLOBAL g, const char *cnm)
*/
static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
int len, int dec, char* key, uint tm, const char* rem,
- char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) {
+ char* dft, char* xtra, char* fmt, int flag, bool dbf, char v)
+{
#if defined(DEVELOPMENT)
// Some client programs regard CHAR(36) as GUID
char var = (len > 255 || len == 36) ? 'V' : v;
@@ -5428,7 +5470,10 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
if (fmt && *fmt) {
switch (ttp) {
case TAB_JSON: error |= sql->append(" JPATH='"); break;
- case TAB_XML: error |= sql->append(" XPATH='"); break;
+#if defined(BSON_SUPPORT)
+ case TAB_BSON: error |= sql->append(" JPATH='"); break;
+#endif // BSON_SUPPORT
+ case TAB_XML: error |= sql->append(" XPATH='"); break;
default: error |= sql->append(" FIELD_FORMAT='");
} // endswitch ttp
@@ -5593,8 +5638,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
String sql(buf, sizeof(buf), system_charset_info);
sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info);
- user = host = pwd = tbl = src = col = ocl = pic = fcl = skc = rnk = zfn = NULL;
- dsn = url = NULL;
+ user= host= pwd= tbl= src= col= ocl= pic= fcl= skc= rnk= zfn= NULL;
+ dsn= url= NULL;
// Get the useful create options
ttp= GetTypeID(topt->type);
@@ -5655,7 +5700,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
try {
// Check table type
- if (ttp == TAB_UNDEF) {
+ if (ttp == TAB_UNDEF && !topt->http) {
topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
ttp= GetTypeID(topt->type);
sprintf(g->Message, "No table_type. Was set to %s", topt->type);
@@ -5666,11 +5711,21 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
goto err;
#if defined(REST_SUPPORT)
} else if (topt->http) {
- switch (ttp) {
+ if (ttp == TAB_UNDEF) {
+ topt->type = "JSON";
+ ttp= GetTypeID(topt->type);
+ sprintf(g->Message, "No table_type. Was set to %s", topt->type);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ } // endif ttp
+
+ switch (ttp) {
case TAB_JSON:
- case TAB_XML:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ case TAB_XML:
case TAB_CSV:
- ttp = TAB_REST;
+ ttp = TAB_REST;
break;
default:
break;
@@ -5853,7 +5908,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
case TAB_XML:
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
case TAB_JSON:
- dsn= strz(g, create_info->connect_string);
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ dsn= strz(g, create_info->connect_string);
if (!fn && !zfn && !mul && !dsn)
sprintf(g->Message, "Missing %s file name", topt->type);
@@ -6017,8 +6075,15 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
qrp= VirColumns(g, fnc == FNC_COL);
break;
case TAB_JSON:
+#if !defined(FORCE_BSON)
qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL);
break;
+#endif // !FORCE_BSON
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+ qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL);
+ break;
+#endif // BSON_SUPPORT
#if defined(JAVA_SUPPORT)
case TAB_MONGO:
url= strz(g, create_info->connect_string);
@@ -6083,6 +6148,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
goto err;
} // endif !nblin
+ // Restore language type
+ if (ttp == TAB_REST)
+ ttp = GetTypeID(topt->type);
+
for (i= 0; !rc && i < qrp->Nblin; i++) {
typ= len= prec= dec= flg= 0;
tm= NOT_NULL_FLAG;
@@ -6258,7 +6327,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
// Now add the field
if (add_field(&sql, ttp, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
- fmt, flg, dbf, v))
+ fmt, flg, dbf, v))
rc= HA_ERR_OUT_OF_MEM;
} // endfor i
@@ -6382,6 +6451,9 @@ int ha_connect::create(const char *name, TABLE *table_arg,
// Check table type
if (type == TAB_UNDEF) {
options->type= (options->srcdef) ? "MYSQL" :
+#if defined(REST_SUPPORT)
+ (options->http) ? "JSON" :
+#endif // REST_SUPPORT
(options->tabname) ? "PROXY" : "DOS";
type= GetTypeID(options->type);
sprintf(g->Message, "No table_type. Will be set to %s", options->type);
@@ -6399,7 +6471,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
inward= IsFileType(type) && !options->filename &&
- (type != TAB_JSON || !cnc.length);
+ ((type != TAB_JSON && type != TAB_BSON) || !cnc.length);
if (options->data_charset) {
const CHARSET_INFO *data_charset;
@@ -6757,8 +6829,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (trace(1))
htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas);
-#if defined(ZIP_SUPPORT)
if (options->zipped) {
+#if defined(ZIP_SUPPORT)
// Check whether the zip entry must be made from a file
PCSZ fn= GetListOption(g, "Load", options->oplist, NULL);
@@ -6780,9 +6852,11 @@ int ha_connect::create(const char *name, TABLE *table_arg,
} // endif LoadFile
} // endif fn
-
+#else // !ZIP_SUPPORT
+ my_message(ER_UNKNOWN_ERROR, "Option ZIP not supported", MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+#endif // !ZIP_SUPPORT
} // endif zipped
-#endif // ZIP_SUPPORT
// To check whether indexes have to be made or remade
if (!g->Xchk) {
@@ -7384,7 +7458,8 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(json_null),
MYSQL_SYSVAR(json_all_path),
MYSQL_SYSVAR(default_depth),
- MYSQL_SYSVAR(json_grp_size),
+ MYSQL_SYSVAR(default_prec),
+ MYSQL_SYSVAR(json_grp_size),
#if defined(JAVA_SUPPORT)
MYSQL_SYSVAR(jvm_path),
MYSQL_SYSVAR(class_path),
@@ -7394,7 +7469,10 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(enable_mongo),
#endif // JAVA_SUPPORT || CMGO_SUPPORT
MYSQL_SYSVAR(cond_push),
- NULL
+#if defined(BSON_SUPPORT)
+ MYSQL_SYSVAR(force_bson),
+#endif // BSON_SUPPORT
+ NULL
};
maria_declare_plugin(connect)
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index 2dab385a36f..2cb75e0adc1 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -766,6 +766,7 @@ void JDBConn::AddJars(PSTRG jpop, char sep)
/***********************************************************************/
bool JDBConn::Connect(PJPARM sop)
{
+ int irc = RC_FX;
bool err = false;
jint rc;
jboolean jt = (trace(1));
diff --git a/storage/connect/jmgfam.cpp b/storage/connect/jmgfam.cpp
index 30f6279146d..2d45753ec63 100644
--- a/storage/connect/jmgfam.cpp
+++ b/storage/connect/jmgfam.cpp
@@ -1,15 +1,15 @@
/************ JMONGO FAM C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: jmgfam.cpp */
/* ------------- */
-/* Version 1.0 */
+/* Version 1.1 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 20017 */
+/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
-/* This program are the Java MongoDB access method classes. */
+/* This program are the Java MongoDB access method classes. */
/* */
/***********************************************************************/
@@ -49,7 +49,11 @@
#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "jmgfam.h"
#if defined(UNIX) || defined(UNIV_LINUX)
@@ -92,10 +96,38 @@ JMGFAM::JMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Version = tdp->Version;
Lrecl = tdp->Lrecl + tdp->Ending;
Curpos = 0;
-} // end of JMGFAM standard constructor
+} // end of JMGFAM Json standard constructor
+
+#if defined(BSON_SUPPORT)
+JMGFAM::JMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL)
+{
+ Jcp = NULL;
+ Ops.Driver = tdp->Schema;
+ Ops.Url = tdp->Uri;
+ Ops.User = NULL;
+ Ops.Pwd = NULL;
+ Ops.Scrollable = false;
+ Ops.Fsize = 0;
+ Ops.Version = tdp->Version;
+ To_Fbt = NULL;
+ Mode = MODE_ANY;
+ Uristr = tdp->Uri;
+ Db_name = tdp->Schema;
+ Coll_name = tdp->Collname;
+ Options = tdp->Options;
+ Filter = tdp->Filter;
+ Wrapname = tdp->Wrapname;
+ Done = false;
+ Pipe = tdp->Pipe;
+ Version = tdp->Version;
+ Lrecl = tdp->Lrecl + tdp->Ending;
+ Curpos = 0;
+} // end of JMGFAM Bson standard constructor
+#endif // BSON_SUPPORT
JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp)
{
+ Jcp = tdfp->Jcp;
//Client = tdfp->Client;
//Database = NULL;
//Collection = tdfp->Collection;
@@ -114,6 +146,7 @@ JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp)
Done = tdfp->Done;
Pipe = tdfp->Pipe;
Version = tdfp->Version;
+ Curpos = tdfp->Curpos;
} // end of JMGFAM copy constructor
/***********************************************************************/
diff --git a/storage/connect/jmgfam.h b/storage/connect/jmgfam.h
index 5c80d993833..c5d9d1f57e6 100644
--- a/storage/connect/jmgfam.h
+++ b/storage/connect/jmgfam.h
@@ -1,7 +1,7 @@
/************** MongoFam H Declares Source Code File (.H) **************/
-/* Name: jmgfam.h Version 1.0 */
+/* Name: jmgfam.h Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */
/* */
/* This file contains the JAVA MongoDB access method classes declares */
/***********************************************************************/
@@ -25,6 +25,9 @@ class DllExport JMGFAM : public DOSFAM {
public:
// Constructor
JMGFAM(PJDEF tdp);
+#if defined(BSON_SUPPORT)
+ JMGFAM(PBDEF tdp);
+#endif // BSON_SUPPORT
JMGFAM(PJMGFAM txfp);
// Implementation
diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp
index c80800bd897..8a12fffbd05 100644
--- a/storage/connect/jmgoconn.cpp
+++ b/storage/connect/jmgoconn.cpp
@@ -121,7 +121,7 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper)
/***********************************************************************/
void JMgoConn::AddJars(PSTRG jpop, char sep)
{
-#if defined(DEVELOPMENT)
+#if defined(BSON_SUPPORT)
if (m_Version == 2) {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin");
@@ -134,7 +134,7 @@ void JMgoConn::AddJars(PSTRG jpop, char sep)
jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar");
} // endif m_Version
-#endif // DEVELOPMENT
+#endif // BSON_SUPPORT
} // end of AddJars
/***********************************************************************/
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index f6dca8146d6..bd9c4fac7a1 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -1,7 +1,7 @@
/*************** json CPP Declares Source Code File (.H) ***************/
-/* Name: json.cpp Version 1.4 */
+/* Name: json.cpp Version 1.5 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* */
/* This file contains the JSON classes functions. */
/***********************************************************************/
@@ -21,7 +21,7 @@
#include "plgdbsem.h"
#include "json.h"
-#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
#if defined(__WIN__)
#define EL "\r\n"
@@ -38,16 +38,16 @@
class SE_Exception {
public:
- SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
- ~SE_Exception() {}
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
- unsigned int nSE;
- PEXCEPTION_RECORD eRec;
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
}; // end of class SE_Exception
void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp)
{
- throw SE_Exception(u, pExp->ExceptionRecord);
+ throw SE_Exception(u, pExp->ExceptionRecord);
} // end of trans_func
char *GetExceptionDesc(PGLOBAL g, unsigned int e);
@@ -58,46 +58,60 @@ char *GetJsonNull(void);
/***********************************************************************/
/* IsNum: check whether this string is all digits. */
/***********************************************************************/
-bool IsNum(PSZ s)
-{
- for (char *p = s; *p; p++)
- if (*p == ']')
- break;
- else if (!isdigit(*p) || *p == '-')
- return false;
+bool IsNum(PSZ s) {
+ for (char* p = s; *p; p++)
+ if (*p == ']')
+ break;
+ else if (!isdigit(*p) || *p == '-')
+ return false;
- return true;
-} // end of IsNum
+ return true;
+} // end of IsNum
/***********************************************************************/
/* NextChr: return the first found '[' or Sep pointer. */
/***********************************************************************/
-char *NextChr(PSZ s, char sep)
+char* NextChr(PSZ s, char sep)
{
- char *p1 = strchr(s, '[');
- char *p2 = strchr(s, sep);
+ char* p1 = strchr(s, '[');
+ char* p2 = strchr(s, sep);
- if (!p2)
- return p1;
- else if (p1)
- return MY_MIN(p1, p2);
+ if (!p2)
+ return p1;
+ else if (p1)
+ return MY_MIN(p1, p2);
- return p2;
-} // end of NextChr
+ return p2;
+} // end of NextChr
+#if 0
+/***********************************************************************/
+/* Allocate a VAL structure, make sure common field and Nd are zeroed. */
+/***********************************************************************/
+PVL AllocVal(PGLOBAL g, JTYP type)
+{
+ PVL vlp = (PVL)PlugSubAlloc(g, NULL, sizeof(VAL));
+
+ vlp->LLn = 0;
+ vlp->Nd = 0;
+ vlp->Type = type;
+ return vlp;
+} // end of AllocVal
+#endif // 0
/***********************************************************************/
/* Parse a json string. */
/* Note: when pretty is not known, the caller set pretty to 3. */
/***********************************************************************/
-PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
+PJSON ParseJson(PGLOBAL g, char* s, size_t len, int* ptyp, bool* comma)
{
- int i, pretty = (ptyp) ? *ptyp : 3;
- bool b = false, pty[3] = {true,true,true};
- PJSON jsp = NULL, jp = NULL;
+ int i, pretty = (ptyp) ? *ptyp : 3;
+ bool b = false, pty[3] = { true,true,true };
+ PJSON jsp = NULL;
+ PJDOC jdp = NULL;
- if (trace(1))
- htrc("ParseJson: s=%.10s len=%d\n", s, len);
+ if (trace(1))
+ htrc("ParseJson: s=%.10s len=%zd\n", s, len);
if (!s || !len) {
strcpy(g->Message, "Void JSON object");
@@ -105,116 +119,402 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
} else if (comma)
*comma = false;
- // Trying to guess the pretty format
- if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
- pty[0] = false;
-
- try {
- jp = new(g) JSON();
- jp->s = s;
- jp->len = len;
- jp->pty = pty;
-
- for (i = 0; i < jp->len; i++)
- switch (s[i]) {
- case '[':
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else
- jsp = jp->ParseArray(g, ++i);
-
- break;
- case '{':
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else if (!(jsp = jp->ParseObject(g, ++i)))
- throw 2;
-
- break;
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- break;
- case ',':
- if (jsp && (pretty == 1 || pretty == 3)) {
- if (comma)
- *comma = true;
-
- pty[0] = pty[2] = false;
- break;
- } // endif pretty
-
- sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
- throw 3;
- case '(':
- b = true;
- break;
- case ')':
- if (b) {
- b = false;
- break;
- } // endif b
- /* falls through */
- default:
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else if (!(jsp = jp->ParseValue(g, i)))
- throw 4;
-
- break;
- }; // endswitch s[i]
-
- if (!jsp)
- sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN(len, 50), s);
- else if (ptyp && pretty == 3) {
- *ptyp = 3; // Not recognized pretty
-
- for (i = 0; i < 3; i++)
- if (pty[i]) {
- *ptyp = i;
- break;
- } // endif pty
-
- } // endif ptyp
-
- } catch (int n) {
- if (trace(1))
- htrc("Exception %d: %s\n", n, g->Message);
- jsp = NULL;
- } catch (const char *msg) {
- strcpy(g->Message, msg);
- jsp = NULL;
- } // end catch
-
- return jsp;
+ // Trying to guess the pretty format
+ if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
+ pty[0] = false;
+
+ try {
+ jdp = new(g) JDOC;
+ jdp->s = s;
+ jdp->len = len;
+ jdp->pty = pty;
+
+ for (i = 0; i < jdp->len; i++)
+ switch (s[i]) {
+ case '[':
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else
+ jsp = jdp->ParseArray(g, ++i);
+
+ break;
+ case '{':
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jdp->ParseObject(g, ++i)))
+ throw 2;
+
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (jsp && (pretty == 1 || pretty == 3)) {
+ if (comma)
+ *comma = true;
+
+ pty[0] = pty[2] = false;
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ throw 3;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+ /* falls through */
+ default:
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jdp->ParseValue(g, i)))
+ throw 4;
+
+ break;
+ }; // endswitch s[i]
+
+ if (!jsp)
+ sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s);
+ else if (ptyp && pretty == 3) {
+ *ptyp = 3; // Not recognized pretty
+
+ for (i = 0; i < 3; i++)
+ if (pty[i]) {
+ *ptyp = i;
+ break;
+ } // endif pty
+
+ } // endif ptyp
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+ jsp = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ jsp = NULL;
+ } // end catch
+
+ return jsp;
} // end of ParseJson
/***********************************************************************/
+/* Serialize a JSON document tree: */
+/***********************************************************************/
+PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
+ PSZ str = NULL;
+ bool b = false, err = true;
+ JOUT* jp;
+ FILE* fs = NULL;
+ PJDOC jdp = NULL;
+
+ g->Message[0] = 0;
+
+ try {
+ jdp = new(g) JDOC; // MUST BE ALLOCATED BEFORE jp !!!!!
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ throw 1;
+ } else if (!fn) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
+ } else {
+ if (!(fs = fopen(fn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, fn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ throw 2;
+ } else if (pretty >= 2) {
+ // Serialize to a pretty file
+ jp = new(g)JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ b = true;
+ jp = new(g)JOUTFILE(g, fs, pretty);
+ } // endif's
+
+ } // endif's
+
+ jdp->SetJp(jp);
+
+ switch (jsp->GetType()) {
+ case TYPE_JAR:
+ err = jdp->SerializeArray((PJAR)jsp, b);
+ break;
+ case TYPE_JOB:
+ err = ((b && jp->Prty()) && jp->WriteChr('\t'));
+ err |= jdp->SerializeObject((PJOB)jsp);
+ break;
+ case TYPE_JVAL:
+ err = jdp->SerializeValue((PJVAL)jsp);
+ break;
+ default:
+ strcpy(g->Message, "Invalid json tree");
+ } // endswitch Type
+
+ if (fs) {
+ fputs(EL, fs);
+ fclose(fs);
+ str = (err) ? NULL : strcpy(g->Message, "Ok");
+ } else if (!err) {
+ str = ((JOUTSTR*)jp)->Strp;
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ } else {
+ if (!g->Message[0])
+ strcpy(g->Message, "Error in Serialize");
+
+ } // endif's
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ str = NULL;
+ } // end catch
+
+ return str;
+} // end of Serialize
+
+
+/* -------------------------- Class JOUTSTR -------------------------- */
+
+/***********************************************************************/
+/* JOUTSTR constructor. */
+/***********************************************************************/
+JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) {
+ PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
+
+ N = 0;
+ Max = pph->FreeBlk;
+ Max = (Max > 32) ? Max - 32 : Max;
+ Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet
+} // end of JOUTSTR constructor
+
+/***********************************************************************/
+/* Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteStr(const char* s) {
+ if (s) {
+ size_t len = strlen(s);
+
+ if (N + len > Max)
+ return true;
+
+ memcpy(Strp + N, s, len);
+ N += len;
+ return false;
+ } else
+ return true;
+
+} // end of WriteStr
+
+/***********************************************************************/
+/* Concatenate a character to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteChr(const char c) {
+ if (N + 1 > Max)
+ return true;
+
+ Strp[N++] = c;
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::Escape(const char* s)
+{
+ if (s) {
+ WriteChr('"');
+
+ for (unsigned int i = 0; s[i]; i++)
+ switch (s[i]) {
+ case '"':
+ case '\\':
+ case '\t':
+ case '\n':
+ case '\r':
+ case '\b':
+ case '\f': WriteChr('\\');
+ // fall through
+ default:
+ WriteChr(s[i]);
+ break;
+ } // endswitch s[i]
+
+ WriteChr('"');
+ } else
+ WriteStr("null");
+
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTFILE -------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteStr(const char* s)
+{
+ // This is temporary
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteChr(const char c)
+{
+ // This is temporary
+ fputc(c, Stream);
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTFILE::Escape(const char* s)
+{
+ // This is temporary
+ if (s) {
+ fputc('"', Stream);
+
+ for (unsigned int i = 0; s[i]; i++)
+ switch (s[i]) {
+ case '"': fputs("\\\"", Stream); break;
+ case '\\': fputs("\\\\", Stream); break;
+ case '\t': fputs("\\t", Stream); break;
+ case '\n': fputs("\\n", Stream); break;
+ case '\r': fputs("\\r", Stream); break;
+ case '\b': fputs("\\b", Stream); break;
+ case '\f': fputs("\\f", Stream); break;
+ default:
+ fputc(s[i], Stream);
+ break;
+ } // endswitch s[i]
+
+ fputc('"', Stream);
+ } else
+ fputs("null", Stream);
+
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTPRT --------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteStr(const char* s)
+{
+ // This is temporary
+ if (B) {
+ fputs(EL, Stream);
+ M--;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ } // endif B
+
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteChr(const char c)
+{
+ switch (c) {
+ case ':':
+ fputs(": ", Stream);
+ break;
+ case '{':
+ case '[':
+#if 0
+ if (M)
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+#endif // 0
+
+ fputc(c, Stream);
+ fputs(EL, Stream);
+ M++;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ break;
+ case '}':
+ case ']':
+ M--;
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ fputc(c, Stream);
+ B = true;
+ break;
+ case ',':
+ fputc(c, Stream);
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ break;
+ default:
+ fputc(c, Stream);
+ } // endswitch c
+
+ return false;
+} // end of WriteChr
+
+/* --------------------------- Class JDOC ---------------------------- */
+
+/***********************************************************************/
/* Parse several items as being in an array. */
/***********************************************************************/
-PJAR JSON::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp)
+PJAR JDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp)
{
- if (pty[0] && (!pretty || pretty > 2)) {
- PJAR jsp;
+ if (pty[0] && (!pretty || pretty > 2)) {
+ PJAR jsp;
- if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3)
- *ptyp = (pty[0]) ? 0 : 3;
+ if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3)
+ *ptyp = (pty[0]) ? 0 : 3;
- return jsp;
- } else
- strcpy(g->Message, "More than one item in file");
+ return jsp;
+ } else
+ strcpy(g->Message, "More than one item in file");
- return NULL;
+ return NULL;
} // end of ParseAsArray
/***********************************************************************/
/* Parse a JSON Array. */
/***********************************************************************/
-PJAR JSON::ParseArray(PGLOBAL g, int& i)
+PJAR JDOC::ParseArray(PGLOBAL g, int& i)
{
- int level = 0;
- bool b = (!i);
+ int level = 0;
+ bool b = (!i);
PJAR jarp = new(g) JARRAY;
for (; i < len; i++)
@@ -235,11 +535,11 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
jarp->InitArray(g);
return jarp;
- case '\n':
- if (!b)
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
+ case '\n':
+ if (!b)
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
case '\t':
break;
default:
@@ -247,17 +547,17 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
sprintf(g->Message, "Unexpected value near %.*s", ARGS);
throw 1;
} else
- jarp->AddValue(g, ParseValue(g, i));
+ jarp->AddArrayValue(g, ParseValue(g, i));
level = (b) ? 1 : 2;
break;
}; // endswitch s[i]
- if (b) {
- // Case of Pretty == 0
- jarp->InitArray(g);
- return jarp;
- } // endif b
+ if (b) {
+ // Case of Pretty == 0
+ jarp->InitArray(g);
+ return jarp;
+ } // endif b
throw ("Unexpected EOF in array");
} // end of ParseArray
@@ -265,10 +565,10 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON Object. */
/***********************************************************************/
-PJOB JSON::ParseObject(PGLOBAL g, int& i)
+PJOB JDOC::ParseObject(PGLOBAL g, int& i)
{
PSZ key;
- int level = 0;
+ int level = -1;
PJOB jobp = new(g) JOBJECT;
PJPR jpp = NULL;
@@ -276,7 +576,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
switch (s[i]) {
case '"':
if (level < 2) {
- key = ParseString(g, ++i);
+ key = ParseString(g, ++i);
jpp = jobp->AddPair(g, key);
level = 1;
} else {
@@ -287,7 +587,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
break;
case ':':
if (level == 1) {
- jpp->Val = ParseValue(g, ++i);
+ jpp->Val = ParseValue(g, ++i);
level = 2;
} else {
sprintf(g->Message, "Unexpected ':' near %.*s", ARGS);
@@ -304,16 +604,16 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
break;
case '}':
- if (level < 2) {
+ if (level == 0 || level == 1) {
sprintf(g->Message, "Unexpected '}' near %.*s", ARGS);
throw 2;
} // endif level
return jobp;
- case '\n':
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
case '\t':
break;
default:
@@ -329,38 +629,42 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON Value. */
/***********************************************************************/
-PJVAL JSON::ParseValue(PGLOBAL g, int& i)
+PJVAL JDOC::ParseValue(PGLOBAL g, int& i)
{
- int n;
PJVAL jvp = new(g) JVALUE;
for (; i < len; i++)
- switch (s[i]) {
- case '\n':
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
- case '\t':
- break;
- default:
- goto suite;
- } // endswitch
+ switch (s[i]) {
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
suite:
switch (s[i]) {
case '[':
- jvp->Jsp = ParseArray(g, ++i);
+ jvp->Jsp = ParseArray(g, ++i);
+ jvp->DataType = TYPE_JSON;
break;
case '{':
- jvp->Jsp = ParseObject(g, ++i);
+ jvp->Jsp = ParseObject(g, ++i);
+ jvp->DataType = TYPE_JSON;
break;
case '"':
- jvp->Value = AllocateValue(g, ParseString(g, ++i), TYPE_STRING);
+// jvp->Val = AllocVal(g, TYPE_STRG);
+ jvp->Strp = ParseString(g, ++i);
+ jvp->DataType = TYPE_STRG;
break;
case 't':
if (!strncmp(s + i, "true", 4)) {
- n = 1;
- jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+// jvp->Val = AllocVal(g, TYPE_BOOL);
+ jvp->B = true;
+ jvp->DataType = TYPE_BOOL;
i += 3;
} else
goto err;
@@ -368,24 +672,26 @@ PJVAL JSON::ParseValue(PGLOBAL g, int& i)
break;
case 'f':
if (!strncmp(s + i, "false", 5)) {
- n = 0;
- jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+// jvp->Val = AllocVal(g, TYPE_BOOL);
+ jvp->B = false;
+ jvp->DataType = TYPE_BOOL;
i += 4;
} else
goto err;
break;
case 'n':
- if (!strncmp(s + i, "null", 4))
+ if (!strncmp(s + i, "null", 4)) {
+ jvp->DataType = TYPE_NULL;
i += 3;
- else
+ } else
goto err;
break;
case '-':
default:
if (s[i] == '-' || isdigit(s[i]))
- jvp->Value = ParseNumeric(g, i);
+ ParseNumeric(g, i, jvp);
else
goto err;
@@ -401,7 +707,7 @@ err:
/***********************************************************************/
/* Unescape and parse a JSON string. */
/***********************************************************************/
-char *JSON::ParseString(PGLOBAL g, int& i)
+char *JDOC::ParseString(PGLOBAL g, int& i)
{
uchar *p;
int n = 0;
@@ -488,15 +794,15 @@ char *JSON::ParseString(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON numeric value. */
/***********************************************************************/
-PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
+void JDOC::ParseNumeric(PGLOBAL g, int& i, PJVAL vlp)
{
char buf[50];
int n = 0;
short nd = 0;
- bool has_dot = false;
- bool has_e = false;
- bool found_digit = false;
- PVAL valp = NULL;
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+//PVL vlp = NULL;
for (; i < len; i++) {
switch (s[i]) {
@@ -545,15 +851,27 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
if (has_dot || has_e) {
double dv = strtod(buf, NULL);
- valp = AllocateValue(g, &dv, TYPE_DOUBLE, nd);
+// vlp = AllocVal(g, TYPE_DBL);
+ vlp->F = dv;
+ vlp->Nd = nd;
+ vlp->DataType = TYPE_DBL;
} else {
long long iv = strtoll(buf, NULL, 10);
- valp = AllocateValue(g, &iv, TYPE_BIGINT);
+ if (iv > INT_MAX32 || iv < INT_MIN32) {
+// vlp = AllocVal(g, TYPE_BINT);
+ vlp->LLn = iv;
+ vlp->DataType = TYPE_BINT;
+ } else {
+// vlp = AllocVal(g, TYPE_INTG);
+ vlp->N = (int)iv;
+ vlp->DataType = TYPE_INTG;
+ } // endif iv
+
} // endif has
i--; // Unstack following character
- return valp;
+ return;
} else
throw("No digit found");
@@ -562,137 +880,59 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
} // end of ParseNumeric
/***********************************************************************/
-/* Serialize a JSON tree: */
-/***********************************************************************/
-PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty)
-{
- PSZ str = NULL;
- bool b = false, err = true;
- JOUT *jp;
- FILE *fs = NULL;
-
- g->Message[0] = 0;
-
- try {
- if (!jsp) {
- strcpy(g->Message, "Null json tree");
- throw 1;
- } else if (!fn) {
- // Serialize to a string
- jp = new(g) JOUTSTR(g);
- b = pretty == 1;
- } else {
- if (!(fs = fopen(fn, "wb"))) {
- sprintf(g->Message, MSG(OPEN_MODE_ERROR),
- "w", (int)errno, fn);
- strcat(strcat(g->Message, ": "), strerror(errno));
- throw 2;
- } else if (pretty >= 2) {
- // Serialize to a pretty file
- jp = new(g)JOUTPRT(g, fs);
- } else {
- // Serialize to a flat file
- b = true;
- jp = new(g)JOUTFILE(g, fs, pretty);
- } // endif's
-
- } // endif's
-
- switch (jsp->GetType()) {
- case TYPE_JAR:
- err = SerializeArray(jp, (PJAR)jsp, b);
- break;
- case TYPE_JOB:
- err = ((b && jp->Prty()) && jp->WriteChr('\t'));
- err |= SerializeObject(jp, (PJOB)jsp);
- break;
- case TYPE_JVAL:
- err = SerializeValue(jp, (PJVAL)jsp);
- break;
- default:
- strcpy(g->Message, "Invalid json tree");
- } // endswitch Type
-
- if (fs) {
- fputs(EL, fs);
- fclose(fs);
- str = (err) ? NULL : strcpy(g->Message, "Ok");
- } else if (!err) {
- str = ((JOUTSTR*)jp)->Strp;
- jp->WriteChr('\0');
- PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
- } else {
- if (!g->Message[0])
- strcpy(g->Message, "Error in Serialize");
-
- } // endif's
-
- } catch (int n) {
- if (trace(1))
- htrc("Exception %d: %s\n", n, g->Message);
- str = NULL;
- } catch (const char *msg) {
- strcpy(g->Message, msg);
- str = NULL;
- } // end catch
-
- return str;
-} // end of Serialize
-
-/***********************************************************************/
/* Serialize a JSON Array. */
/***********************************************************************/
-bool SerializeArray(JOUT *js, PJAR jarp, bool b)
+bool JDOC::SerializeArray(PJAR jarp, bool b)
{
bool first = true;
- if (b) {
- if (js->Prty()) {
- if (js->WriteChr('['))
- return true;
- else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t')))
- return true;
+ if (b) {
+ if (js->Prty()) {
+ if (js->WriteChr('['))
+ return true;
+ else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t')))
+ return true;
- } // endif Prty
+ } // endif Prty
- } else if (js->WriteChr('['))
- return true;
+ } else if (js->WriteChr('['))
+ return true;
for (int i = 0; i < jarp->size(); i++) {
if (first)
first = false;
- else if ((!b || js->Prty()) && js->WriteChr(','))
+ else if ((!b || js->Prty()) && js->WriteChr(','))
return true;
- else if (b) {
- if (js->Prty() < 2 && js->WriteStr(EL))
- return true;
- else if (js->Prty() == 1 && js->WriteChr('\t'))
- return true;
+ else if (b) {
+ if (js->Prty() < 2 && js->WriteStr(EL))
+ return true;
+ else if (js->Prty() == 1 && js->WriteChr('\t'))
+ return true;
- } // endif b
+ } // endif b
- if (SerializeValue(js, jarp->GetValue(i)))
+ if (SerializeValue(jarp->GetArrayValue(i)))
return true;
} // endfor i
- if (b && js->Prty() == 1 && js->WriteStr(EL))
+ if (b && js->Prty() == 1 && js->WriteStr(EL))
return true;
- return ((!b || js->Prty()) && js->WriteChr(']'));
+ return ((!b || js->Prty()) && js->WriteChr(']'));
} // end of SerializeArray
/***********************************************************************/
/* Serialize a JSON Object. */
/***********************************************************************/
-bool SerializeObject(JOUT *js, PJOB jobp)
+bool JDOC::SerializeObject(PJOB jobp)
{
bool first = true;
if (js->WriteChr('{'))
return true;
- for (PJPR pair = jobp->First; pair; pair = pair->Next) {
+ for (PJPR pair = jobp->GetFirst(); pair; pair = pair->Next) {
if (first)
first = false;
else if (js->WriteChr(','))
@@ -702,7 +942,7 @@ bool SerializeObject(JOUT *js, PJOB jobp)
js->WriteStr(pair->Key) ||
js->WriteChr('"') ||
js->WriteChr(':') ||
- SerializeValue(js, pair->Val))
+ SerializeValue(pair->Val))
return true;
} // endfor i
@@ -713,259 +953,70 @@ bool SerializeObject(JOUT *js, PJOB jobp)
/***********************************************************************/
/* Serialize a JSON Value. */
/***********************************************************************/
-bool SerializeValue(JOUT *js, PJVAL jvp)
+bool JDOC::SerializeValue(PJVAL jvp)
{
+ char buf[64];
PJAR jap;
PJOB jop;
- PVAL valp;
+ //PVL vlp;
if ((jap = jvp->GetArray()))
- return SerializeArray(js, jap, false);
+ return SerializeArray(jap, false);
else if ((jop = jvp->GetObject()))
- return SerializeObject(js, jop);
- else if (!(valp = jvp->Value) || valp->IsNull())
- return js->WriteStr("null");
- else switch (valp->GetType()) {
- case TYPE_TINY:
- return js->WriteStr(valp->GetTinyValue() ? "true" : "false");
- case TYPE_STRING:
- return js->Escape(valp->GetCharValue());
+ return SerializeObject(jop);
+//else if (!(vlp = jvp->Val))
+// return js->WriteStr("null");
+ else switch (jvp->DataType) {
+ case TYPE_BOOL:
+ return js->WriteStr(jvp->B ? "true" : "false");
+ case TYPE_STRG:
+ case TYPE_DTM:
+ return js->Escape(jvp->Strp);
+ case TYPE_INTG:
+ sprintf(buf, "%d", jvp->N);
+ return js->WriteStr(buf);
+ case TYPE_BINT:
+ sprintf(buf, "%lld", jvp->LLn);
+ return js->WriteStr(buf);
+ case TYPE_DBL:
+ sprintf(buf, "%.*lf", jvp->Nd, jvp->F);
+ return js->WriteStr(buf);
+ case TYPE_NULL:
+ return js->WriteStr("null");
default:
- if (valp->IsTypeNum()) {
- char buf[32];
-
- return js->WriteStr(valp->GetCharString(buf));
- } // endif valp
-
- } // endswitch Type
+ return js->WriteStr("???"); // TODO
+ } // endswitch Type
- strcpy(js->g->Message, "Unrecognized value");
- return true;
+ strcpy(js->g->Message, "Unrecognized value");
+ return true;
} // end of SerializeValue
-/* -------------------------- Class JOUTSTR -------------------------- */
-
-/***********************************************************************/
-/* JOUTSTR constructor. */
-/***********************************************************************/
-JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g)
-{
- PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
-
- N = 0;
- Max = pph->FreeBlk;
- Max = (Max > 32) ? Max - 32 : Max;
- Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet
-} // end of JOUTSTR constructor
-
-/***********************************************************************/
-/* Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::WriteStr(const char *s)
-{
- if (s) {
- size_t len = strlen(s);
-
- if (N + len > Max)
- return true;
-
- memcpy(Strp + N, s, len);
- N += len;
- return false;
- } else
- return true;
-
-} // end of WriteStr
-
-/***********************************************************************/
-/* Concatenate a character to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::WriteChr(const char c)
-{
- if (N + 1 > Max)
- return true;
-
- Strp[N++] = c;
- return false;
-} // end of WriteChr
-
-/***********************************************************************/
-/* Escape and Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::Escape(const char *s)
-{
- WriteChr('"');
-
- for (unsigned int i = 0; s[i]; i++)
- switch (s[i]) {
- case '"':
- case '\\':
- case '\t':
- case '\n':
- case '\r':
- case '\b':
- case '\f': WriteChr('\\');
- // fall through
- default:
- WriteChr(s[i]);
- break;
- } // endswitch s[i]
-
- WriteChr('"');
- return false;
-} // end of Escape
-
-/* ------------------------- Class JOUTFILE -------------------------- */
-
-/***********************************************************************/
-/* Write a string to the Serialize file. */
-/***********************************************************************/
-bool JOUTFILE::WriteStr(const char *s)
-{
- // This is temporary
- fputs(s, Stream);
- return false;
-} // end of WriteStr
-
-/***********************************************************************/
-/* Write a character to the Serialize file. */
-/***********************************************************************/
-bool JOUTFILE::WriteChr(const char c)
-{
- // This is temporary
- fputc(c, Stream);
- return false;
-} // end of WriteChr
-
-/***********************************************************************/
-/* Escape and Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTFILE::Escape(const char *s)
-{
- // This is temporary
- fputc('"', Stream);
-
- for (unsigned int i = 0; s[i]; i++)
- switch (s[i]) {
- case '"': fputs("\\\"", Stream); break;
- case '\\': fputs("\\\\", Stream); break;
- case '\t': fputs("\\t", Stream); break;
- case '\n': fputs("\\n", Stream); break;
- case '\r': fputs("\\r", Stream); break;
- case '\b': fputs("\\b", Stream); break;
- case '\f': fputs("\\f", Stream); break;
- default:
- fputc(s[i], Stream);
- break;
- } // endswitch s[i]
-
- fputc('"', Stream);
- return false;
-} // end of Escape
-
-/* ------------------------- Class JOUTPRT --------------------------- */
-
-/***********************************************************************/
-/* Write a string to the Serialize pretty file. */
-/***********************************************************************/
-bool JOUTPRT::WriteStr(const char *s)
-{
- // This is temporary
- if (B) {
- fputs(EL, Stream);
- M--;
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- B = false;
- } // endif B
-
- fputs(s, Stream);
- return false;
-} // end of WriteStr
-
-/***********************************************************************/
-/* Write a character to the Serialize pretty file. */
-/***********************************************************************/
-bool JOUTPRT::WriteChr(const char c)
-{
- switch (c) {
- case ':':
- fputs(": ", Stream);
- break;
- case '{':
- case '[':
-#if 0
- if (M)
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-#endif // 0
-
- fputc(c, Stream);
- fputs(EL, Stream);
- M++;
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- break;
- case '}':
- case ']':
- M--;
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- fputc(c, Stream);
- B = true;
- break;
- case ',':
- fputc(c, Stream);
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- B = false;
- break;
- default:
- fputc(c, Stream);
- } // endswitch c
-
-return false;
-} // end of WriteChr
-
/* -------------------------- Class JOBJECT -------------------------- */
/***********************************************************************/
/* Return the number of pairs in this object. */
/***********************************************************************/
-int JOBJECT::GetSize(bool b)
-{
- if (b) {
- // Return only non null pairs
- int n = 0;
+int JOBJECT::GetSize(bool b) {
+ int n = 0;
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- if (jpp->Val && !jpp->Val->IsNull())
- n++;
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ // If b return only non null pairs
+ if (!b || jpp->Val && !jpp->Val->IsNull())
+ n++;
- return n;
- } else
- return Size;
-
-} // end of GetSize
+ return n;
+} // end of GetSize
/***********************************************************************/
/* Add a new pair to an Object. */
/***********************************************************************/
PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
{
- PJPR jpp = new(g) JPAIR(key);
+ PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR));
+
+ jpp->Key = key;
+ jpp->Next = NULL;
+ jpp->Val = NULL;
if (Last)
Last->Next = jpp;
@@ -973,7 +1024,6 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
First = jpp;
Last = jpp;
- Size++;
return jpp;
} // end of AddPair
@@ -982,13 +1032,13 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
/***********************************************************************/
PJAR JOBJECT::GetKeyList(PGLOBAL g)
{
- PJAR jarp = new(g) JARRAY();
+ PJAR jarp = new(g) JARRAY();
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- jarp->AddValue(g, new(g) JVALUE(g, jpp->GetKey()));
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key));
- jarp->InitArray(g);
- return jarp;
+ jarp->InitArray(g);
+ return jarp;
} // end of GetKeyList
/***********************************************************************/
@@ -996,19 +1046,19 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g)
/***********************************************************************/
PJAR JOBJECT::GetValList(PGLOBAL g)
{
- PJAR jarp = new(g) JARRAY();
+ PJAR jarp = new(g) JARRAY();
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- jarp->AddValue(g, jpp->GetVal());
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ jarp->AddArrayValue(g, jpp->Val);
- jarp->InitArray(g);
- return jarp;
+ jarp->InitArray(g);
+ return jarp;
} // end of GetValList
/***********************************************************************/
/* Get the value corresponding to the given key. */
/***********************************************************************/
-PJVAL JOBJECT::GetValue(const char* key)
+PJVAL JOBJECT::GetKeyValue(const char* key)
{
for (PJPR jp = First; jp; jp = jp->Next)
if (!strcmp(jp->Key, key))
@@ -1020,43 +1070,57 @@ PJVAL JOBJECT::GetValue(const char* key)
/***********************************************************************/
/* Return the text corresponding to all keys (XML like). */
/***********************************************************************/
-PSZ JOBJECT::GetText(PGLOBAL g, PSZ text)
+PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text)
{
- int n;
+ if (First) {
+ bool b;
- if (!text) {
- text = (char*)PlugSubAlloc(g, NULL, 0);
- text[0] = 0;
- n = 1;
- } else
- n = 0;
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(' ');
- if (!First && n)
- return NULL;
- else if (n == 1 && Size == 1 && !strcmp(First->GetKey(), "$date")) {
- int i;
+ b = false;
+ } // endif text
- First->Val->GetText(g, text);
- i = (text[1] == '-' ? 2 : 1);
+ if (b && !First->Next && !strcmp(First->Key, "$date")) {
+ int i;
+ PSZ s;
- if (IsNum(text + i)) {
- // Date is in milliseconds
- int j = (int)strlen(text);
+ First->Val->GetText(g, text);
+ s = text->GetStr();
+ i = (s[1] == '-' ? 2 : 1);
- if (j >= 4 + i)
- text[j - 3] = 0; // Change it to seconds
- else
- strcpy(text, " 0");
+ if (IsNum(s + i)) {
+ // Date is in milliseconds
+ int j = text->GetLength();
- } // endif text
+ if (j >= 4 + i) {
+ s[j - 3] = 0; // Change it to seconds
+ text->SetLength((uint)strlen(s));
+ } else
+ text->Set(" 0");
+
+ } // endif text
+
+ } else for (PJPR jp = First; jp; jp = jp->Next) {
+ jp->Val->GetText(g, text);
+
+ if (jp->Next)
+ text->Append(' ');
- } else for (PJPR jp = First; jp; jp = jp->Next)
- jp->Val->GetText(g, text);
+ } // endfor jp
- if (n)
- PlugSubAlloc(g, NULL, strlen(text) + 1);
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
- return text + n;
+ } // endif First
+
+ return NULL;
} // end of GetText;
/***********************************************************************/
@@ -1064,25 +1128,25 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text)
/***********************************************************************/
bool JOBJECT::Merge(PGLOBAL g, PJSON jsp)
{
- if (jsp->GetType() != TYPE_JOB) {
- strcpy(g->Message, "Second argument is not an object");
- return true;
- } // endif Type
+ if (jsp->GetType() != TYPE_JOB) {
+ strcpy(g->Message, "Second argument is not an object");
+ return true;
+ } // endif Type
- PJOB jobp = (PJOB)jsp;
+ PJOB jobp = (PJOB)jsp;
- for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next)
- SetValue(g, jpp->GetVal(), jpp->GetKey());
+ for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next)
+ SetKeyValue(g, jpp->Val, jpp->Key);
- return false;
+ return false;
} // end of Marge;
/***********************************************************************/
/* Set or add a value corresponding to the given key. */
/***********************************************************************/
-void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key)
+void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key)
{
- PJPR jp;
+ PJPR jp;
for (jp = First; jp; jp = jp->Next)
if (!strcmp(jp->Key, key)) {
@@ -1102,15 +1166,14 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key)
/***********************************************************************/
void JOBJECT::DeleteKey(PCSZ key)
{
- PJPR jp, *pjp = &First;
+ PJPR jp, *pjp = &First;
- for (jp = First; jp; jp = jp->Next)
- if (!strcmp(jp->Key, key)) {
- *pjp = jp->Next;
- Size--;
- break;
- } else
- pjp = &jp->Next;
+ for (jp = First; jp; jp = jp->Next)
+ if (!strcmp(jp->Key, key)) {
+ *pjp = jp->Next;
+ break;
+ } else
+ pjp = &jp->Next;
} // end of DeleteKey
@@ -1129,23 +1192,35 @@ bool JOBJECT::IsNull(void)
/* -------------------------- Class JARRAY --------------------------- */
/***********************************************************************/
+/* JARRAY constructor. */
+/***********************************************************************/
+JARRAY::JARRAY(void) : JSON()
+{
+ Type = TYPE_JAR;
+ Size = 0;
+ Alloc = 0;
+ First = Last = NULL;
+ Mvals = NULL;
+} // end of JARRAY constructor
+
+/***********************************************************************/
/* Return the number of values in this object. */
/***********************************************************************/
int JARRAY::GetSize(bool b)
{
- if (b) {
- // Return only non null values
- int n = 0;
+ if (b) {
+ // Return only non null values
+ int n = 0;
- for (PJVAL jvp = First; jvp; jvp = jvp->Next)
- if (!jvp->IsNull())
- n++;
+ for (PJVAL jvp = First; jvp; jvp = jvp->Next)
+ if (!jvp->IsNull())
+ n++;
- return n;
- } else
- return Size;
+ return n;
+ } else
+ return Size;
-} // end of GetSize
+} // end of GetSize
/***********************************************************************/
/* Make the array of values from the values list. */
@@ -1166,19 +1241,19 @@ void JARRAY::InitArray(PGLOBAL g)
} // endif Size
for (i = 0, jvp = First; jvp; jvp = jvp->Next)
- if (!jvp->Del) {
- Mvals[i++] = jvp;
- pjvp = &jvp->Next;
- Last = jvp;
- } else
- *pjvp = jvp->Next;
+ if (!jvp->Del) {
+ Mvals[i++] = jvp;
+ pjvp = &jvp->Next;
+ Last = jvp;
+ } else
+ *pjvp = jvp->Next;
} // end of InitArray
/***********************************************************************/
/* Get the Nth value of an Array. */
/***********************************************************************/
-PJVAL JARRAY::GetValue(int i)
+PJVAL JARRAY::GetArrayValue(int i)
{
if (Mvals && i >= 0 && i < Size)
return Mvals[i];
@@ -1189,33 +1264,33 @@ PJVAL JARRAY::GetValue(int i)
/***********************************************************************/
/* Add a Value to the Array Value list. */
/***********************************************************************/
-PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x)
+PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int *x)
{
if (!jvp)
jvp = new(g) JVALUE;
- if (x) {
- int i = 0, n = *x;
- PJVAL jp, *jpp = &First;
+ if (x) {
+ int i = 0, n = *x;
+ PJVAL jp, *jpp = &First;
- for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next));
+ for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next));
- (*jpp) = jvp;
+ (*jpp) = jvp;
- if (!(jvp->Next = jp))
- Last = jvp;
+ if (!(jvp->Next = jp))
+ Last = jvp;
- } else {
- if (!First)
- First = jvp;
- else if (Last == First)
- First->Next = Last = jvp;
- else
- Last->Next = jvp;
+ } else {
+ if (!First)
+ First = jvp;
+ else if (Last == First)
+ First->Next = Last = jvp;
+ else
+ Last->Next = jvp;
- Last = jvp;
- Last->Next = NULL;
- } // endif x
+ Last = jvp;
+ Last->Next = NULL;
+ } // endif x
return jvp;
} // end of AddValue
@@ -1225,24 +1300,24 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x)
/***********************************************************************/
bool JARRAY::Merge(PGLOBAL g, PJSON jsp)
{
- if (jsp->GetType() != TYPE_JAR) {
- strcpy(g->Message, "Second argument is not an array");
- return true;
- } // endif Type
+ if (jsp->GetType() != TYPE_JAR) {
+ strcpy(g->Message, "Second argument is not an array");
+ return true;
+ } // endif Type
- PJAR arp = (PJAR)jsp;
+ PJAR arp = (PJAR)jsp;
- for (int i = 0; i < jsp->size(); i++)
- AddValue(g, arp->GetValue(i));
+ for (int i = 0; i < arp->size(); i++)
+ AddArrayValue(g, arp->GetArrayValue(i));
- InitArray(g);
- return false;
+ InitArray(g);
+ return false;
} // end of Merge
/***********************************************************************/
/* Set the nth Value of the Array Value list. */
/***********************************************************************/
-bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n)
+bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n)
{
int i = 0;
PJVAL jp, *jpp = &First;
@@ -1259,25 +1334,42 @@ bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n)
/***********************************************************************/
/* Return the text corresponding to all values. */
/***********************************************************************/
-PSZ JARRAY::GetText(PGLOBAL g, PSZ text)
+PSZ JARRAY::GetText(PGLOBAL g, PSTRG text)
{
- int n;
- PJVAL jp;
+ if (First) {
+ bool b;
+ PJVAL jp;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(" (");
+ else
+ text->Append('(');
+
+ b = false;
+ }
+
+ for (jp = First; jp; jp = jp->Next) {
+ jp->GetText(g, text);
+
+ if (jp->Next)
+ text->Append(", ");
+ else if (!b)
+ text->Append(')');
- if (!text) {
- text = (char*)PlugSubAlloc(g, NULL, 0);
- text[0] = 0;
- n = 1;
- } else
- n = 0;
+ } // endfor jp
- for (jp = First; jp; jp = jp->Next)
- jp->GetText(g, text);
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
- if (n)
- PlugSubAlloc(g, NULL, strlen(text) + 1);
+ } // endif First
- return text + n;
+ return NULL;
} // end of GetText;
/***********************************************************************/
@@ -1285,13 +1377,13 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text)
/***********************************************************************/
bool JARRAY::DeleteValue(int n)
{
- PJVAL jvp = GetValue(n);
+ PJVAL jvp = GetArrayValue(n);
- if (jvp) {
- jvp->Del = true;
- return false;
- } else
- return true;
+ if (jvp) {
+ jvp->Del = true;
+ return false;
+ } else
+ return true;
} // end of DeleteValue
@@ -1310,32 +1402,60 @@ bool JARRAY::IsNull(void)
/* -------------------------- Class JVALUE- -------------------------- */
/***********************************************************************/
-/* Constructor for a JSON. */
+/* Constructor for a JVALUE. */
/***********************************************************************/
JVALUE::JVALUE(PJSON jsp) : JSON()
{
- if (jsp->GetType() == TYPE_JVAL) {
- Jsp = jsp->GetJsp();
- Value = jsp->GetValue();
- } else {
- Jsp = jsp;
- Value = NULL;
- } // endif Type
+ if (jsp->GetType() == TYPE_JVAL) {
+ PJVAL jvp = (PJVAL)jsp;
+
+// Val = ((PJVAL)jsp)->GetVal();
+ if (jvp->DataType == TYPE_JSON) {
+ Jsp = jvp->GetJsp();
+ DataType = TYPE_JSON;
+ Nd = 0;
+ } else {
+ LLn = jvp->LLn; // Must be LLn on 32 bit machines
+ Nd = jvp->Nd;
+ DataType = jvp->DataType;
+ } // endelse Jsp
+
+ } else {
+ Jsp = jsp;
+// Val = NULL;
+ DataType = TYPE_JSON;
+ Nd = 0;
+ } // endif Type
- Next = NULL;
- Del = false;
- Size = 1;
-} // end of JVALUE constructor
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
+} // end of JVALUE constructor
+#if 0
/***********************************************************************/
-/* Constructor for a Value with a given string or numeric value. */
+/* Constructor for a JVALUE with a given string or numeric value. */
/***********************************************************************/
-JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
+JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON()
{
Jsp = NULL;
- Value = AllocateValue(g, valp);
+ Val = vlp;
Next = NULL;
Del = false;
+ Type = TYPE_JVAL;
+} // end of JVALUE constructor
+#endif // 0
+
+/***********************************************************************/
+/* Constructor for a JVALUE with a given string or numeric value. */
+/***********************************************************************/
+JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() {
+ Jsp = NULL;
+//Val = NULL;
+ SetValue(g, valp);
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
} // end of JVALUE constructor
/***********************************************************************/
@@ -1343,23 +1463,40 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
/***********************************************************************/
JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON()
{
- Jsp = NULL;
- Value = AllocateValue(g, (void*)strp, TYPE_STRING);
- Next = NULL;
- Del = false;
+ Jsp = NULL;
+//Val = AllocVal(g, TYPE_STRG);
+ Strp = (char*)strp;
+ DataType = TYPE_STRG;
+ Nd = 0;
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
} // end of JVALUE constructor
/***********************************************************************/
+/* Set or reset all Jvalue members. */
+/***********************************************************************/
+void JVALUE::Clear(void)
+{
+ Jsp = NULL;
+ Next = NULL;
+ Type = TYPE_JVAL;
+ Del = false;
+ Nd = 0;
+ DataType = TYPE_NULL;
+} // end of Clear
+
+/***********************************************************************/
/* Returns the type of the Value's value. */
/***********************************************************************/
JTYP JVALUE::GetValType(void)
{
- if (Jsp)
+ if (DataType == TYPE_JSON)
return Jsp->GetType();
- else if (Value)
- return (JTYP)Value->GetType();
+//else if (Val)
+// return Val->Type;
else
- return TYPE_NULL;
+ return DataType;
} // end of GetValType
@@ -1368,7 +1505,7 @@ JTYP JVALUE::GetValType(void)
/***********************************************************************/
PJOB JVALUE::GetObject(void)
{
- if (Jsp && Jsp->GetType() == TYPE_JOB)
+ if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JOB)
return (PJOB)Jsp;
return NULL;
@@ -1379,18 +1516,46 @@ PJOB JVALUE::GetObject(void)
/***********************************************************************/
PJAR JVALUE::GetArray(void)
{
- if (Jsp && Jsp->GetType() == TYPE_JAR)
+ if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JAR)
return (PJAR)Jsp;
return NULL;
} // end of GetArray
/***********************************************************************/
-/* Return the Value's Integer value. */
+/* Return the Value's as a Value class. */
/***********************************************************************/
-int JVALUE::GetInteger(void)
+PVAL JVALUE::GetValue(PGLOBAL g)
{
- return (Value) ? Value->GetIntValue() : 0;
+ PVAL valp = NULL;
+
+ if (DataType != TYPE_JSON)
+ if (DataType == TYPE_STRG)
+ valp = AllocateValue(g, Strp, DataType, Nd);
+ else
+ valp = AllocateValue(g, &LLn, DataType, Nd);
+
+ return valp;
+} // end of GetValue
+
+/***********************************************************************/
+/* Return the Value's Integer value. */
+/***********************************************************************/
+int JVALUE::GetInteger(void) {
+ int n;
+
+ switch (DataType) {
+ case TYPE_INTG: n = N; break;
+ case TYPE_DBL: n = (int)F; break;
+ case TYPE_DTM:
+ case TYPE_STRG: n = atoi(Strp); break;
+ case TYPE_BOOL: n = (B) ? 1 : 0; break;
+ case TYPE_BINT: n = (int)LLn; break;
+ default:
+ n = 0;
+ } // endswitch Type
+
+ return n;
} // end of GetInteger
/***********************************************************************/
@@ -1398,7 +1563,20 @@ int JVALUE::GetInteger(void)
/***********************************************************************/
long long JVALUE::GetBigint(void)
{
- return (Value) ? Value->GetBigintValue() : 0;
+ long long lln;
+
+ switch (DataType) {
+ case TYPE_BINT: lln = LLn; break;
+ case TYPE_INTG: lln = (long long)N; break;
+ case TYPE_DBL: lln = (long long)F; break;
+ case TYPE_DTM:
+ case TYPE_STRG: lln = atoll(Strp); break;
+ case TYPE_BOOL: lln = (B) ? 1 : 0; break;
+ default:
+ lln = 0;
+ } // endswitch Type
+
+ return lln;
} // end of GetBigint
/***********************************************************************/
@@ -1406,75 +1584,157 @@ long long JVALUE::GetBigint(void)
/***********************************************************************/
double JVALUE::GetFloat(void)
{
- return (Value) ? Value->GetFloatValue() : 0.0;
+ double d;
+
+ switch (DataType) {
+ case TYPE_DBL: d = F; break;
+ case TYPE_BINT: d = (double)LLn; break;
+ case TYPE_INTG: d = (double)N; break;
+ case TYPE_DTM:
+ case TYPE_STRG: d = atof(Strp); break;
+ case TYPE_BOOL: d = (B) ? 1.0 : 0.0; break;
+ default:
+ d = 0.0;
+ } // endswitch Type
+
+ return d;
} // end of GetFloat
/***********************************************************************/
/* Return the Value's String value. */
/***********************************************************************/
-PSZ JVALUE::GetString(PGLOBAL g)
+PSZ JVALUE::GetString(PGLOBAL g, char *buff)
{
- char *p;
-
- if (Value) {
- char buf[32];
-
- if ((p = Value->GetCharString(buf)) == buf)
- p = PlugDup(g, buf);
-
- } else
- p = NULL;
-
- return p;
+ char buf[32];
+ char *p = (buff) ? buff : buf;
+
+ switch (DataType) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ p = Strp;
+ break;
+ case TYPE_INTG:
+ sprintf(p, "%d", N);
+ break;
+ case TYPE_BINT:
+ sprintf(p, "%lld", LLn);
+ break;
+ case TYPE_DBL:
+ sprintf(p, "%.*lf", Nd, F);
+ break;
+ case TYPE_BOOL:
+ p = (char*)((B) ? "true" : "false");
+ break;
+ case TYPE_NULL:
+ p = (char*)"null";
+ break;
+ default:
+ p = NULL;
+ } // endswitch Type
+
+
+ return (p == buf) ? (char*)PlugDup(g, buf) : p;
} // end of GetString
/***********************************************************************/
/* Return the Value's String value. */
/***********************************************************************/
-PSZ JVALUE::GetText(PGLOBAL g, PSZ text)
+PSZ JVALUE::GetText(PGLOBAL g, PSTRG text)
{
- if (Jsp)
+ if (DataType == TYPE_JSON)
return Jsp->GetText(g, text);
- char buf[32];
- PSZ s = (Value) ? Value->GetCharString(buf) : NULL;
+ char buff[32];
+ PSZ s = (DataType == TYPE_NULL) ? NULL : GetString(g, buff);
- if (s)
- strcat(strcat(text, " "), s);
- else if (GetJsonNull())
- strcat(strcat(text, " "), GetJsonNull());
+ if (s)
+ text->Append(s);
+ else if (GetJsonNull())
+ text->Append(GetJsonNull());
- return text;
+ return NULL;
} // end of GetText
void JVALUE::SetValue(PJSON jsp)
{
- if (jsp && jsp->GetType() == TYPE_JVAL) {
- Jsp = jsp->GetJsp();
- Value = jsp->GetValue();
- } else {
- Jsp = jsp;
- Value = NULL;
- } // endif Type
+ if (DataType == TYPE_JSON && jsp->GetType() == TYPE_JVAL) {
+ Jsp = jsp->GetJsp();
+ Nd = ((PJVAL)jsp)->Nd;
+ DataType = ((PJVAL)jsp)->DataType;
+ // Val = ((PJVAL)jsp)->GetVal();
+ } else {
+ Jsp = jsp;
+ DataType = TYPE_JSON;
+ } // endif Type
+
+} // end of SetValue;
+
+void JVALUE::SetValue(PGLOBAL g, PVAL valp)
+{
+//if (!Val)
+// Val = AllocVal(g, TYPE_VAL);
+
+ if (!valp || valp->IsNull()) {
+ DataType = TYPE_NULL;
+ } else switch (valp->GetType()) {
+ case TYPE_DATE:
+ if (((DTVAL*)valp)->IsFormatted())
+ Strp = PlugDup(g, valp->GetCharValue());
+ else {
+ char buf[32];
+
+ Strp = PlugDup(g, valp->GetCharString(buf));
+ } // endif Formatted
+
+ DataType = TYPE_DTM;
+ break;
+ case TYPE_STRING:
+ Strp = PlugDup(g, valp->GetCharValue());
+ DataType = TYPE_STRG;
+ break;
+ case TYPE_DOUBLE:
+ case TYPE_DECIM:
+ F = valp->GetFloatValue();
+
+ if (IsTypeNum(valp->GetType()))
+ Nd = valp->GetValPrec();
+
+ DataType = TYPE_DBL;
+ break;
+ case TYPE_TINY:
+ B = valp->GetTinyValue() != 0;
+ DataType = TYPE_BOOL;
+ case TYPE_INT:
+ N = valp->GetIntValue();
+ DataType = TYPE_INTG;
+ break;
+ case TYPE_BIGINT:
+ LLn = valp->GetBigintValue();
+ DataType = TYPE_BINT;
+ break;
+ default:
+ sprintf(g->Message, "Unsupported typ %d\n", valp->GetType());
+ throw(777);
+ } // endswitch Type
-} // end of SetValue;
+} // end of SetValue
/***********************************************************************/
/* Set the Value's value as the given integer. */
/***********************************************************************/
void JVALUE::SetInteger(PGLOBAL g, int n)
{
- Value = AllocateValue(g, &n, TYPE_INT);
- Jsp = NULL;
+ N = n;
+ DataType = TYPE_INTG;
} // end of SetInteger
/***********************************************************************/
/* Set the Value's Boolean value as a tiny integer. */
/***********************************************************************/
-void JVALUE::SetTiny(PGLOBAL g, char n)
+void JVALUE::SetBool(PGLOBAL g, bool b)
{
- Value = AllocateValue(g, &n, TYPE_TINY);
- Jsp = NULL;
+ B = b;
+ DataType = TYPE_BOOL;
} // end of SetTiny
/***********************************************************************/
@@ -1482,8 +1742,8 @@ void JVALUE::SetTiny(PGLOBAL g, char n)
/***********************************************************************/
void JVALUE::SetBigint(PGLOBAL g, long long ll)
{
- Value = AllocateValue(g, &ll, TYPE_BIGINT);
- Jsp = NULL;
+ LLn = ll;
+ DataType = TYPE_BINT;
} // end of SetBigint
/***********************************************************************/
@@ -1491,17 +1751,19 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll)
/***********************************************************************/
void JVALUE::SetFloat(PGLOBAL g, double f)
{
- Value = AllocateValue(g, &f, TYPE_DOUBLE, 6);
- Jsp = NULL;
+ F = f;
+ Nd = 6;
+ DataType = TYPE_DBL;
} // end of SetFloat
/***********************************************************************/
/* Set the Value's value as the given string. */
/***********************************************************************/
-void JVALUE::SetString(PGLOBAL g, PSZ s, short c)
+void JVALUE::SetString(PGLOBAL g, PSZ s, int ci)
{
- Value = AllocateValue(g, s, TYPE_STRING, c);
- Jsp = NULL;
+ Strp = s;
+ Nd = ci;
+ DataType = TYPE_STRG;
} // end of SetString
/***********************************************************************/
@@ -1509,6 +1771,239 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c)
/***********************************************************************/
bool JVALUE::IsNull(void)
{
- return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true;
+ return (DataType == TYPE_JSON) ? Jsp->IsNull() : DataType == TYPE_NULL;
} // end of IsNull
+
+/* ---------------------------- Class SWAP --------------------------- */
+
+/***********************************************************************/
+/* Replace all pointers by offsets or the opposite. */
+/***********************************************************************/
+void SWAP::SwapJson(PJSON jsp, bool move)
+{
+ if (move)
+ MoffJson(jsp);
+ else
+ MptrJson((PJSON)MakeOff(Base, jsp));
+
+ return;
+} // end of SwapJson
+
+/***********************************************************************/
+/* Replace all pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffJson(PJSON jsp) {
+ size_t res = 0;
+
+ if (jsp)
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ res = MoffArray((PJAR)jsp);
+ break;
+ case TYPE_JOB:
+ res = MoffObject((PJOB)jsp);
+ break;
+ case TYPE_JVAL:
+ res = MoffJValue((PJVAL)jsp);
+ break;
+ default:
+ throw "Invalid json tree";
+ } // endswitch Type
+
+ return res;
+} // end of MoffJson
+
+/***********************************************************************/
+/* Replace all array pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffArray(PJAR jarp)
+{
+ if (jarp->First) {
+ for (int i = 0; i < jarp->Size; i++)
+ jarp->Mvals[i] = (PJVAL)MakeOff(Base, jarp->Mvals[i]);
+
+ jarp->Mvals = (PJVAL*)MakeOff(Base, jarp->Mvals);
+ jarp->First = (PJVAL)MoffJValue(jarp->First);
+ jarp->Last = (PJVAL)MakeOff(Base, jarp->Last);
+ } // endif First
+
+ return MakeOff(Base, jarp);
+} // end of MoffArray
+
+/***********************************************************************/
+/* Replace all object pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffObject(PJOB jobp) {
+ if (jobp->First) {
+ jobp->First = (PJPR)MoffPair(jobp->First);
+ jobp->Last = (PJPR)MakeOff(Base, jobp->Last);
+ } // endif First
+
+ return MakeOff(Base, jobp);
+} // end of MoffObject
+
+/***********************************************************************/
+/* Replace all pair pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffPair(PJPR jpp) {
+ jpp->Key = (PCSZ)MakeOff(Base, (void*)jpp->Key);
+
+ if (jpp->Val)
+ jpp->Val = (PJVAL)MoffJValue(jpp->Val);
+
+ if (jpp->Next)
+ jpp->Next = (PJPR)MoffPair(jpp->Next);
+
+ return MakeOff(Base, jpp);
+} // end of MoffPair
+
+/***********************************************************************/
+/* Replace all jason value pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffJValue(PJVAL jvp) {
+ if (!jvp->Del) {
+ if (jvp->DataType == TYPE_JSON)
+ jvp->Jsp = (PJSON)MoffJson(jvp->Jsp);
+ else if (jvp->DataType == TYPE_STRG)
+ jvp->Strp = (PSZ)MakeOff(Base, (jvp->Strp));
+
+// if (jvp->Val)
+// jvp->Val = (PVL)MoffVal(jvp->Val);
+
+ } // endif Del
+
+ if (jvp->Next)
+ jvp->Next = (PJVAL)MoffJValue(jvp->Next);
+
+ return MakeOff(Base, jvp);
+} // end of MoffJValue
+
+#if 0
+/***********************************************************************/
+/* Replace string pointers by offset. */
+/***********************************************************************/
+size_t SWAP::MoffVal(PVL vlp) {
+ if (vlp->Type == TYPE_STRG)
+ vlp->Strp = (PSZ)MakeOff(Base, (vlp->Strp));
+
+ return MakeOff(Base, vlp);
+} // end of MoffVal
+#endif // 0
+
+/***********************************************************************/
+/* Replace all offsets by pointers. */
+/***********************************************************************/
+PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset
+ PJSON jsp = (PJSON)MakePtr(Base, (size_t)ojp);
+
+ if (ojp)
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ jsp = MptrArray((PJAR)ojp);
+ break;
+ case TYPE_JOB:
+ jsp = MptrObject((PJOB)ojp);
+ break;
+ case TYPE_JVAL:
+ jsp = MptrJValue((PJVAL)ojp);
+ break;
+ default:
+ throw "Invalid json tree";
+ } // endswitch Type
+
+ return jsp;
+} // end of MptrJson
+
+/***********************************************************************/
+/* Replace all array offsets by pointers. */
+/***********************************************************************/
+PJAR SWAP::MptrArray(PJAR ojar) {
+ PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar);
+
+ jarp = (PJAR)new((long long)jarp) JARRAY(0);
+
+ if (jarp->First) {
+ jarp->Mvals = (PJVAL*)MakePtr(Base, (size_t)jarp->Mvals);
+
+ for (int i = 0; i < jarp->Size; i++)
+ jarp->Mvals[i] = (PJVAL)MakePtr(Base, (size_t)jarp->Mvals[i]);
+
+ jarp->First = (PJVAL)MptrJValue(jarp->First);
+ jarp->Last = (PJVAL)MakePtr(Base, (size_t)jarp->Last);
+ } // endif First
+
+ return jarp;
+} // end of MptrArray
+
+/***********************************************************************/
+/* Replace all object offsets by pointers. */
+/***********************************************************************/
+PJOB SWAP::MptrObject(PJOB ojob) {
+ PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob);
+
+ jobp = (PJOB)new((long long)jobp) JOBJECT(0);
+
+ if (jobp->First) {
+ jobp->First = (PJPR)MptrPair(jobp->First);
+ jobp->Last = (PJPR)MakePtr(Base, (size_t)jobp->Last);
+ } // endif First
+
+ return jobp;
+} // end of MptrObject
+
+/***********************************************************************/
+/* Replace all pair offsets by pointers. */
+/***********************************************************************/
+PJPR SWAP::MptrPair(PJPR ojp) {
+ PJPR jpp = (PJPR)MakePtr(Base, (size_t)ojp);
+
+ jpp->Key = (PCSZ)MakePtr(Base, (size_t)jpp->Key);
+
+ if (jpp->Val)
+ jpp->Val = (PJVAL)MptrJValue(jpp->Val);
+
+ if (jpp->Next)
+ jpp->Next = (PJPR)MptrPair(jpp->Next);
+
+ return jpp;
+} // end of MptrPair
+
+/***********************************************************************/
+/* Replace all value offsets by pointers. */
+/***********************************************************************/
+PJVAL SWAP::MptrJValue(PJVAL ojv) {
+ PJVAL jvp = (PJVAL)MakePtr(Base, (size_t)ojv);
+
+ jvp = (PJVAL)new((long long)jvp) JVALUE(0);
+
+ if (!jvp->Del) {
+ if (jvp->DataType == TYPE_JSON)
+ jvp->Jsp = (PJSON)MptrJson(jvp->Jsp);
+ else if (jvp->DataType == TYPE_STRG)
+ jvp->Strp = (PSZ)MakePtr(Base, (size_t)jvp->Strp);
+
+// if (jvp->Val)
+// jvp->Val = (PVL)MptrVal(jvp->Val);
+
+ } // endif Del
+
+ if (jvp->Next)
+ jvp->Next = (PJVAL)MptrJValue(jvp->Next);
+
+ return jvp;
+} // end of MptrJValue
+
+#if 0
+/***********************************************************************/
+/* Replace string offsets by a pointer. */
+/***********************************************************************/
+PVL SWAP::MptrVal(PVL ovl) {
+ PVL vlp = (PVL)MakePtr(Base, (size_t)ovl);
+
+ if (vlp->Type == TYPE_STRG)
+ vlp->Strp = (PSZ)MakePtr(Base, (size_t)vlp->Strp);
+
+ return vlp;
+} // end of MptrValue
+#endif // 0
diff --git a/storage/connect/json.h b/storage/connect/json.h
index bc94b372133..3a026f5df22 100644
--- a/storage/connect/json.h
+++ b/storage/connect/json.h
@@ -5,8 +5,10 @@
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
+#pragma once
#include <mysql_com.h>
#include "value.h"
+#include "xobject.h"
#if defined(_DEBUG)
#define X assert(false);
@@ -14,240 +16,147 @@
#define X
#endif
-enum JTYP {TYPE_NULL = TYPE_VOID,
- TYPE_STRG = TYPE_STRING,
- TYPE_DBL = TYPE_DOUBLE,
- TYPE_BOOL = TYPE_TINY,
- TYPE_BINT = TYPE_BIGINT,
- TYPE_DTM = TYPE_DATE,
- TYPE_INTG = TYPE_INT,
- TYPE_VAL = 12,
- TYPE_JSON,
- TYPE_JAR,
- TYPE_JOB,
- TYPE_JVAL};
-
+enum JTYP {
+ TYPE_NULL = TYPE_VOID,
+ TYPE_STRG = TYPE_STRING,
+ TYPE_DBL = TYPE_DOUBLE,
+ TYPE_BOOL = TYPE_TINY,
+ TYPE_BINT = TYPE_BIGINT,
+ TYPE_INTG = TYPE_INT,
+ TYPE_DTM = TYPE_DATE,
+ TYPE_FLOAT,
+ TYPE_JAR,
+ TYPE_JOB,
+ TYPE_JVAL,
+ TYPE_JSON,
+ TYPE_DEL,
+ TYPE_UNKNOWN
+};
+
+class JDOC;
class JOUT;
class JSON;
-class JMAP;
class JVALUE;
class JOBJECT;
class JARRAY;
-typedef class JPAIR *PJPR;
+typedef class JDOC *PJDOC;
typedef class JSON *PJSON;
typedef class JVALUE *PJVAL;
typedef class JOBJECT *PJOB;
typedef class JARRAY *PJAR;
-typedef struct {
- char *str;
- int len;
- } STRG, *PSG;
-
-// BSON size should be equal on Linux and Windows
-#define BMX 255
-typedef struct BSON* PBSON;
+typedef struct JPAIR *PJPR;
+//typedef struct VAL *PVL;
/***********************************************************************/
-/* Structure used to return binary json to Json UDF functions. */
+/* Structure JPAIR. The pairs of a json Object. */
/***********************************************************************/
-struct BSON {
- char Msg[BMX + 1];
- char *Filename;
- PGLOBAL G;
- int Pretty;
- ulong Reslen;
- my_bool Changed;
- PJSON Top;
- PJSON Jsp;
- PBSON Bsp;
-}; // end of struct BSON
-
-PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+struct JPAIR {
+ PCSZ Key; // This pair key name
+ PJVAL Val; // To the value of the pair
+ PJPR Next; // To the next pair
+}; // end of struct JPAIR
+//PVL AllocVal(PGLOBAL g, JTYP type);
char *NextChr(PSZ s, char sep);
char *GetJsonNull(void);
+const char* GetFmt(int type, bool un);
-PJSON ParseJson(PGLOBAL g, char* s, int n, int* prty = NULL, bool* b = NULL);
+PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL);
PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty);
-bool SerializeArray(JOUT *js, PJAR jarp, bool b);
-bool SerializeObject(JOUT *js, PJOB jobp);
-bool SerializeValue(JOUT *js, PJVAL jvp);
-char *NextChr(PSZ s, char sep);
DllExport bool IsNum(PSZ s);
/***********************************************************************/
-/* Class JOUT. Used by Serialize. */
-/***********************************************************************/
-class JOUT : public BLOCK {
- public:
- JOUT(PGLOBAL gp) : BLOCK() {g = gp; Pretty = 3;}
-
- virtual bool WriteStr(const char *s) = 0;
- virtual bool WriteChr(const char c) = 0;
- virtual bool Escape(const char *s) = 0;
- int Prty(void) {return Pretty;}
-
- // Member
- PGLOBAL g;
- int Pretty;
-}; // end of class JOUT
-
-/***********************************************************************/
-/* Class JOUTSTR. Used to Serialize to a string. */
-/***********************************************************************/
-class JOUTSTR : public JOUT {
- public:
- JOUTSTR(PGLOBAL g);
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
- virtual bool Escape(const char *s);
-
- // Member
- char *Strp; // The serialized string
- size_t N; // Position of next char
- size_t Max; // String max size
-}; // end of class JOUTSTR
-
-/***********************************************************************/
-/* Class JOUTFILE. Used to Serialize to a file. */
-/***********************************************************************/
-class JOUTFILE : public JOUT {
- public:
- JOUTFILE(PGLOBAL g, FILE *str, int pty) : JOUT(g) {Stream = str; Pretty = pty;}
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
- virtual bool Escape(const char *s);
-
- // Member
- FILE *Stream;
-}; // end of class JOUTFILE
-
-/***********************************************************************/
-/* Class JOUTPRT. Used to Serialize to a pretty file. */
-/***********************************************************************/
-class JOUTPRT : public JOUTFILE {
- public:
- JOUTPRT(PGLOBAL g, FILE *str) : JOUTFILE(g, str, 2) {M = 0; B = false;}
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
-
- // Member
- int M;
- bool B;
-}; // end of class JOUTPRT
-
-/***********************************************************************/
-/* Class PAIR. The pairs of a json Object. */
+/* Class JDOC. The class for parsing and serializing json documents. */
/***********************************************************************/
-class JPAIR : public BLOCK {
- friend class JOBJECT;
- friend class JSNX;
- friend class JSON;
- friend bool SerializeObject(JOUT *, PJOB);
- public:
- JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
-
- inline PCSZ GetKey(void) {return Key;}
- inline PJVAL GetVal(void) {return Val;}
- inline PJPR GetNext(void) {return Next;}
+class JDOC: public BLOCK {
+ friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*);
+ friend PSZ Serialize(PGLOBAL, PJSON, char*, int);
+public:
+ JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {}
- protected:
- PCSZ Key; // This pair key name
- PJVAL Val; // To the value of the pair
- PJPR Next; // To the next pair
-}; // end of class JPAIR
-
-/***********************************************************************/
-/* Class JSON. The base class for all other json classes. */
-/***********************************************************************/
-class JSON : public BLOCK {
- friend PJSON ParseJson(PGLOBAL, char*, int, int*, bool*);
- public:
- JSON(void) : s(NULL), len(0), pty(NULL) {Size = 0;}
-
- int size(void) {return Size;}
- virtual int GetSize(bool b) {return Size;}
- virtual void Clear(void) {Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JSON;}
- virtual JTYP GetValType(void) {X return TYPE_JSON;}
- virtual void InitArray(PGLOBAL g) {X}
-//virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;}
- virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;}
- virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;}
- virtual PJAR GetValList(PGLOBAL g) {X return NULL;}
- virtual PJVAL GetValue(const char *key) {X return NULL;}
- virtual PJOB GetObject(void) {return NULL;}
- virtual PJAR GetArray(void) {return NULL;}
- virtual PJVAL GetValue(int i) {X return NULL;}
- virtual PVAL GetValue(void) {X return NULL;}
- virtual PJSON GetJsp(void) { X return NULL; }
- virtual PJSON GetJson(void) { X return NULL; }
- virtual PJPR GetFirst(void) {X return NULL;}
- virtual int GetInteger(void) {X return 0;}
- virtual double GetFloat() {X return 0.0;}
- virtual PSZ GetString(PGLOBAL g) {X return NULL;}
- virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;}
- virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; }
- virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; }
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X}
- virtual void SetValue(PVAL valp) {X}
- virtual void SetValue(PJSON jsp) {X}
- virtual void SetString(PGLOBAL g, PSZ s, short c) {X}
- virtual void SetInteger(PGLOBAL g, int n) {X}
- virtual void SetFloat(PGLOBAL g, double f) {X}
- virtual void DeleteKey(PCSZ k) {X}
- virtual bool DeleteValue(int i) {X return true;}
- virtual bool IsNull(void) {X return true;}
+ void SetJp(JOUT* jp) { js = jp; }
protected:
PJAR ParseArray(PGLOBAL g, int& i);
PJOB ParseObject(PGLOBAL g, int& i);
PJVAL ParseValue(PGLOBAL g, int& i);
char *ParseString(PGLOBAL g, int& i);
- PVAL ParseNumeric(PGLOBAL g, int& i);
+ void ParseNumeric(PGLOBAL g, int& i, PJVAL jvp);
PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp);
+ bool SerializeArray(PJAR jarp, bool b);
+ bool SerializeObject(PJOB jobp);
+ bool SerializeValue(PJVAL jvp);
- // Members
- int Size;
-
- // Only used when parsing
+ // Members used when parsing and serializing
private:
+ JOUT* js;
char *s;
int len;
bool *pty;
+}; // end of class JDOC
+
+/***********************************************************************/
+/* Class JSON. The base class for all other json classes. */
+/***********************************************************************/
+class JSON : public BLOCK {
+public:
+ // Constructor
+ JSON(void) { Type = TYPE_JSON; }
+ JSON(int) {}
+
+ // Implementation
+ inline JTYP GetType(void) { return Type; }
+
+ // Methods
+ virtual int size(void) { return 1; }
+ virtual void Clear(void) { X }
+ virtual PJOB GetObject(void) { return NULL; }
+ virtual PJAR GetArray(void) { return NULL; }
+ virtual PJVAL GetArrayValue(int i) { X return NULL; }
+ virtual int GetSize(bool b) { X return 0; }
+ virtual PJSON GetJsp(void) { X return NULL; }
+ virtual PJPR GetFirst(void) { X return NULL; }
+ virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; }
+ virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; }
+ virtual void SetValue(PJSON jsp) { X }
+ virtual bool DeleteValue(int i) { X return true; }
+ virtual bool IsNull(void) { X return true; }
+
+ // Members
+ JTYP Type;
}; // end of class JSON
/***********************************************************************/
/* Class JOBJECT: contains a list of value pairs. */
/***********************************************************************/
class JOBJECT : public JSON {
- friend bool SerializeObject(JOUT *, PJOB);
+ friend class JDOC;
friend class JSNX;
- public:
- JOBJECT(void) : JSON() {First = Last = NULL;}
-
- using JSON::GetValue;
- using JSON::SetValue;
- virtual void Clear(void) {First = Last = NULL; Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JOB;}
+ friend class SWAP;
+public:
+ JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; }
+ JOBJECT(int i) : JSON(i) {}
+
+ // Methods
+ virtual void Clear(void) {First = Last = NULL;}
+//virtual JTYP GetValType(void) {return TYPE_JOB;}
virtual PJPR GetFirst(void) {return First;}
virtual int GetSize(bool b);
- virtual PJPR AddPair(PGLOBAL g, PCSZ key);
virtual PJOB GetObject(void) {return this;}
- virtual PJVAL GetValue(const char* key);
- virtual PJAR GetKeyList(PGLOBAL g);
- virtual PJAR GetValList(PGLOBAL g);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool Merge(PGLOBAL g, PJSON jsp);
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key);
- virtual void DeleteKey(PCSZ k);
virtual bool IsNull(void);
+ // Specific
+ PJPR AddPair(PGLOBAL g, PCSZ key);
+ PJVAL GetKeyValue(const char* key);
+ PJAR GetKeyList(PGLOBAL g);
+ PJAR GetValList(PGLOBAL g);
+ void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key);
+ void DeleteKey(PCSZ k);
+
protected:
PJPR First;
PJPR Last;
@@ -257,27 +166,30 @@ class JOBJECT : public JSON {
/* Class JARRAY. */
/***********************************************************************/
class JARRAY : public JSON {
- friend PJAR ParseArray(PGLOBAL, int&, STRG&, bool*);
+ friend class SWAP;
public:
- JARRAY(void) : JSON() {Alloc = 0; First = Last = NULL; Mvals = NULL;}
+ JARRAY(void);
+ JARRAY(int i) : JSON(i) {}
- using JSON::GetValue;
- using JSON::SetValue;
+ // Methods
virtual void Clear(void) {First = Last = NULL; Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JAR;}
+ virtual int size(void) { return Size; }
virtual PJAR GetArray(void) {return this;}
virtual int GetSize(bool b);
- PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL);
- virtual void InitArray(PGLOBAL g);
- virtual PJVAL GetValue(int i);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
+ virtual PJVAL GetArrayValue(int i);
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool Merge(PGLOBAL g, PJSON jsp);
- virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i);
virtual bool DeleteValue(int n);
virtual bool IsNull(void);
+ // Specific
+ PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL);
+ bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i);
+ void InitArray(PGLOBAL g);
+
protected:
// Members
+ int Size; // The number of items in the array
int Alloc; // The Mvals allocated size
PJVAL First; // Used when constructing
PJVAL Last; // Last constructed value
@@ -290,43 +202,161 @@ class JARRAY : public JSON {
class JVALUE : public JSON {
friend class JARRAY;
friend class JSNX;
+ friend class JSONDISC;
friend class JSONCOL;
friend class JSON;
- friend bool SerializeValue(JOUT*, PJVAL);
- public:
- JVALUE(void) : JSON() {Clear();}
+ friend class JDOC;
+ friend class SWAP;
+public:
+ JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); }
JVALUE(PJSON jsp);
+//JVALUE(PGLOBAL g, PVL vlp);
JVALUE(PGLOBAL g, PVAL valp);
JVALUE(PGLOBAL g, PCSZ strp);
+ JVALUE(int i) : JSON(i) {}
+
+ //using JSON::GetVal;
+ //using JSON::SetVal;
- using JSON::GetValue;
- using JSON::SetValue;
- virtual void Clear(void)
- {Jsp = NULL; Value = NULL; Next = NULL; Del = false; Size = 1;}
- virtual JTYP GetType(void) {return TYPE_JVAL;}
+ // Methods
+ virtual void Clear(void);
+//virtual JTYP GetType(void) {return TYPE_JVAL;}
virtual JTYP GetValType(void);
virtual PJOB GetObject(void);
virtual PJAR GetArray(void);
- virtual PVAL GetValue(void) {return Value;}
- virtual PJSON GetJsp(void) {return Jsp;}
- virtual PJSON GetJson(void) { return (Jsp ? Jsp : this); }
- virtual int GetInteger(void);
- virtual long long GetBigint(void);
- virtual double GetFloat(void);
- virtual PSZ GetString(PGLOBAL g);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
- virtual void SetValue(PJSON jsp);
- virtual void SetValue(PVAL valp) { Value = valp; Jsp = NULL; }
- virtual void SetString(PGLOBAL g, PSZ s, short c = 0);
- virtual void SetInteger(PGLOBAL g, int n);
- virtual void SetBigint(PGLOBAL g, longlong ll);
- virtual void SetFloat(PGLOBAL g, double f);
- virtual void SetTiny(PGLOBAL g, char f);
+ virtual PJSON GetJsp(void) {return (DataType == TYPE_JSON ? Jsp : NULL);}
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool IsNull(void);
+ // Specific
+ //inline PVL GetVal(void) { return Val; }
+ //inline void SetVal(PVL vlp) { Val = vlp; }
+ inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); }
+ PSZ GetString(PGLOBAL g, char* buff = NULL);
+ int GetInteger(void);
+ long long GetBigint(void);
+ double GetFloat(void);
+ PVAL GetValue(PGLOBAL g);
+ void SetValue(PJSON jsp);
+ void SetValue(PGLOBAL g, PVAL valp);
+ void SetString(PGLOBAL g, PSZ s, int ci = 0);
+ void SetInteger(PGLOBAL g, int n);
+ void SetBigint(PGLOBAL g, longlong ll);
+ void SetFloat(PGLOBAL g, double f);
+ void SetBool(PGLOBAL g, bool b);
+
protected:
- PJSON Jsp; // To the json value
- PVAL Value; // The numeric value
- PJVAL Next; // Next value in array
- bool Del; // True when deleted
+ union {
+ PJSON Jsp; // To the json value
+ char *Strp; // Ptr to a string
+ int N; // An integer value
+ long long LLn; // A big integer value
+ double F; // A (double) float value
+ bool B; // True or false
+ };
+//PVL Val; // To the string or numeric value
+ PJVAL Next; // Next value in array
+ JTYP DataType; // The data value type
+ int Nd; // Decimal number
+ bool Del; // True when deleted
}; // end of class JVALUE
+
+
+/***********************************************************************/
+/* Class JOUT. Used by Serialize. */
+/***********************************************************************/
+class JOUT : public BLOCK {
+public:
+ JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; }
+
+ virtual bool WriteStr(const char* s) = 0;
+ virtual bool WriteChr(const char c) = 0;
+ virtual bool Escape(const char* s) = 0;
+ int Prty(void) { return Pretty; }
+
+ // Member
+ PGLOBAL g;
+ int Pretty;
+}; // end of class JOUT
+
+/***********************************************************************/
+/* Class JOUTSTR. Used to Serialize to a string. */
+/***********************************************************************/
+class JOUTSTR : public JOUT {
+public:
+ JOUTSTR(PGLOBAL g);
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char* s);
+
+ // Member
+ char* Strp; // The serialized string
+ size_t N; // Position of next char
+ size_t Max; // String max size
+}; // end of class JOUTSTR
+
+/***********************************************************************/
+/* Class JOUTFILE. Used to Serialize to a file. */
+/***********************************************************************/
+class JOUTFILE : public JOUT {
+public:
+ JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; }
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char* s);
+
+ // Member
+ FILE* Stream;
+}; // end of class JOUTFILE
+
+/***********************************************************************/
+/* Class JOUTPRT. Used to Serialize to a pretty file. */
+/***********************************************************************/
+class JOUTPRT : public JOUTFILE {
+public:
+ JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; }
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+
+ // Member
+ int M;
+ bool B;
+}; // end of class JOUTPRT
+
+
+/***********************************************************************/
+/* Class SWAP. Used to make or unmake a JSON tree movable. */
+/* This is done by making all pointers to offsets. */
+/***********************************************************************/
+class SWAP : public BLOCK {
+public:
+ // Constructor
+ SWAP(PGLOBAL g, PJSON jsp)
+ {
+ G = g, Base = (char*)jsp - 8;
+ }
+
+ // Methods
+ void SwapJson(PJSON jsp, bool move);
+
+protected:
+ size_t MoffJson(PJSON jnp);
+ size_t MoffArray(PJAR jarp);
+ size_t MoffObject(PJOB jobp);
+ size_t MoffJValue(PJVAL jvp);
+ size_t MoffPair(PJPR jpp);
+//size_t MoffVal(PVL vlp);
+ PJSON MptrJson(PJSON jnp);
+ PJAR MptrArray(PJAR jarp);
+ PJOB MptrObject(PJOB jobp);
+ PJVAL MptrJValue(PJVAL jvp);
+ PJPR MptrPair(PJPR jpp);
+//PVL MptrVal(PVL vlp);
+
+ // Member
+ PGLOBAL G;
+ void *Base;
+}; // end of class SWAP
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 44028a32564..044ed0772ea 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -27,12 +27,6 @@
#endif
#define M 9
-bool IsNum(PSZ s);
-char *NextChr(PSZ s, char sep);
-char *GetJsonNull(void);
-uint GetJsonGrpSize(void);
-static int IsJson(UDF_ARGS *args, uint i, bool b = false);
-static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i);
static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error);
static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -40,8 +34,10 @@ static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
static PJSON JsonNew(PGLOBAL g, JTYP type);
static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL);
static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64);
+uint GetJsonGroupSize(void);
+static void SetChanged(PBSON bsp);
-static uint JsonGrpSize = 10;
+uint JsonGrpSize = 10;
/*********************************************************************************/
/* SubAlloc a new JSNX class with protection against memory exhaustion. */
@@ -63,7 +59,7 @@ static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len)
return jsx;
} /* end of JsnxNew */
- /* ----------------------------------- JSNX ------------------------------------ */
+/* ----------------------------------- JSNX ------------------------------------ */
/*********************************************************************************/
/* JSNX public constructor. */
@@ -347,7 +343,7 @@ PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp)
/*********************************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/*********************************************************************************/
-void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
+void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
{
if (val) {
vp->SetNull(false);
@@ -355,11 +351,22 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
if (Jb) {
vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0));
} else switch (val->GetValType()) {
+ case TYPE_DTM:
case TYPE_STRG:
+ vp->SetValue_psz(val->GetString(g));
+ break;
case TYPE_INTG:
+ vp->SetValue(val->GetInteger());
+ break;
case TYPE_BINT:
+ vp->SetValue(val->GetBigint());
+ break;
case TYPE_DBL:
- vp->SetValue_pval(val->GetValue());
+ if (vp->IsTypeNum())
+ vp->SetValue(val->GetFloat());
+ else // Get the proper number of decimals
+ vp->SetValue_psz(val->GetString(g));
+
break;
case TYPE_BOOL:
if (vp->IsTypeNum())
@@ -369,14 +376,11 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
break;
case TYPE_JAR:
- SetJsonValue(g, vp, val->GetArray()->GetValue(0), n);
+ vp->SetValue_psz(val->GetArray()->GetText(g, NULL));
break;
case TYPE_JOB:
-// if (!vp->IsTypeNum() || !Strict) {
vp->SetValue_psz(val->GetObject()->GetText(g, NULL));
break;
-// } // endif Type
-
case TYPE_NULL:
vp->SetNull(true);
/* falls through */
@@ -412,11 +416,10 @@ void JSNX::ReadValue(PGLOBAL g)
/*********************************************************************************/
PVAL JSNX::GetColumnValue(PGLOBAL g, PJSON row, int i)
{
- int n = Nod - 1;
PJVAL val = NULL;
val = GetRowValue(g, row, i);
- SetJsonValue(g, Value, val, n);
+ SetJsonValue(g, Value, val);
return Value;
} // end of GetColumnValue
@@ -430,7 +433,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
for (; i < Nod && row; i++) {
if (Nodes[i].Op == OP_NUM) {
- Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1);
+ Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
val = new(g) JVALUE(g, Value);
return val;
} else if (Nodes[i].Op == OP_XX) {
@@ -452,7 +455,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} //endif Op
} else
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
@@ -460,7 +463,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else if (Nodes[i].Op == OP_EXP)
return (PJVAL)ExpandArray(g, arp, i);
else
@@ -468,7 +471,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif's
@@ -488,7 +491,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} // endfor i
- // SetJsonValue(g, Value, val, n);
+ // SetJsonValue(g, Value, val);
return val;
} // end of GetRowValue
@@ -519,17 +522,17 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
htrc("CalculateArray size=%d op=%d\n", ars, op);
for (i = 0; i < ars; i++) {
- jvrp = arp->GetValue(i);
+ jvrp = arp->GetArrayValue(i);
if (trace(1))
htrc("i=%d nv=%d\n", i, nv);
if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) {
if (jvrp->IsNull()) {
- jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING);
+ jvrp->SetString(g, GetJsonNull(), 0);
jvp = jvrp;
} else if (n < Nod - 1 && jvrp->GetJson()) {
- jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1));
jvp = &jval;
} else
jvp = jvrp;
@@ -539,10 +542,10 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
jvp->GetString(g), jvp->IsNull() ? 1 : 0);
if (!nv++) {
- SetJsonValue(g, vp, jvp, n);
+ SetJsonValue(g, vp, jvp);
continue;
} else
- SetJsonValue(g, MulVal, jvp, n);
+ SetJsonValue(g, MulVal, jvp);
if (!MulVal->IsNull()) {
switch (op) {
@@ -612,13 +615,13 @@ my_bool JSNX::CheckPath(PGLOBAL g)
} else switch (row->GetType()) {
case TYPE_JOB:
if (Nodes[i].Key)
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
if (!Nodes[i].Key)
if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
- val = ((PJAR)row)->GetValue(Nodes[i].Rank);
+ val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank);
break;
case TYPE_JVAL:
@@ -655,20 +658,20 @@ PJSON JSNX::GetRow(PGLOBAL g)
// Expected Array was not there, wrap the value
continue;
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
arp = (PJAR)row;
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else
- val = arp->GetValue(Nodes[i].Rx);
+ val = arp->GetArrayValue(Nodes[i].Rx);
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif Nodes
@@ -695,9 +698,9 @@ PJSON JSNX::GetRow(PGLOBAL g)
nwr = new(g)JOBJECT;
if (row->GetType() == TYPE_JOB) {
- ((PJOB)row)->SetValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key);
+ ((PJOB)row)->SetKeyValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key);
} else if (row->GetType() == TYPE_JAR) {
- ((PJAR)row)->AddValue(g, new(g)JVALUE(nwr));
+ ((PJAR)row)->AddArrayValue(g, new(g)JVALUE(nwr));
((PJAR)row)->InitArray(g);
} else {
strcpy(g->Message, "Wrong type when writing new row");
@@ -740,16 +743,16 @@ my_bool JSNX::WriteValue(PGLOBAL g, PJVAL jvalp)
if (arp) {
if (!Nodes[Nod-1].Key) {
if (Nodes[Nod-1].Op == OP_EQ)
- arp->SetValue(g, jvalp, Nodes[Nod-1].Rank);
+ arp->SetArrayValue(g, jvalp, Nodes[Nod-1].Rank);
else
- arp->AddValue(g, jvalp);
+ arp->AddArrayValue(g, jvalp);
arp->InitArray(g);
} // endif Key
} else if (objp) {
if (Nodes[Nod-1].Key)
- objp->SetValue(g, jvalp, Nodes[Nod-1].Key);
+ objp->SetKeyValue(g, jvalp, Nodes[Nod-1].Key);
} else if (jvp)
jvp->SetValue(jvalp);
@@ -781,13 +784,13 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k)
switch (jsp->GetType()) {
case TYPE_JAR:
- err = LocateArray((PJAR)jsp);
+ err = LocateArray(g, (PJAR)jsp);
break;
case TYPE_JOB:
- err = LocateObject((PJOB)jsp);
+ err = LocateObject(g, (PJOB)jsp);
break;
case TYPE_JVAL:
- err = LocateValue((PJVAL)jsp);
+ err = LocateValue(g, (PJVAL)jsp);
break;
default:
err = true;
@@ -818,7 +821,7 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k)
/*********************************************************************************/
/* Locate in a JSON Array. */
/*********************************************************************************/
-my_bool JSNX::LocateArray(PJAR jarp)
+my_bool JSNX::LocateArray(PGLOBAL g, PJAR jarp)
{
char s[16];
size_t m = Jp->N;
@@ -830,7 +833,7 @@ my_bool JSNX::LocateArray(PJAR jarp)
if (Jp->WriteStr(s))
return true;
- if (LocateValue(jarp->GetValue(i)))
+ if (LocateValue(g, jarp->GetArrayValue(i)))
return true;
} // endfor i
@@ -841,7 +844,7 @@ my_bool JSNX::LocateArray(PJAR jarp)
/*********************************************************************************/
/* Locate in a JSON Object. */
/*********************************************************************************/
-my_bool JSNX::LocateObject(PJOB jobp)
+my_bool JSNX::LocateObject(PGLOBAL g, PJOB jobp)
{
size_t m;
@@ -856,7 +859,7 @@ my_bool JSNX::LocateObject(PJOB jobp)
if (Jp->WriteStr(pair->Key))
return true;
- if (LocateValue(pair->Val))
+ if (LocateValue(g, pair->Val))
return true;
} // endfor i
@@ -867,14 +870,14 @@ my_bool JSNX::LocateObject(PJOB jobp)
/*********************************************************************************/
/* Locate a JSON Value. */
/*********************************************************************************/
-my_bool JSNX::LocateValue(PJVAL jvp)
+my_bool JSNX::LocateValue(PGLOBAL g, PJVAL jvp)
{
- if (CompareTree(Jvalp, jvp))
+ if (CompareTree(g, Jvalp, jvp))
Found = (--K == 0);
else if (jvp->GetArray())
- return LocateArray(jvp->GetArray());
+ return LocateArray(g, jvp->GetArray());
else if (jvp->GetObject())
- return LocateObject(jvp->GetObject());
+ return LocateObject(g, jvp->GetObject());
return false;
} // end of LocateValue
@@ -907,13 +910,13 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx)
switch (jsp->GetType()) {
case TYPE_JAR:
- err = LocateArrayAll((PJAR)jsp);
+ err = LocateArrayAll(g, (PJAR)jsp);
break;
case TYPE_JOB:
- err = LocateObjectAll((PJOB)jsp);
+ err = LocateObjectAll(g, (PJOB)jsp);
break;
case TYPE_JVAL:
- err = LocateValueAll((PJVAL)jsp);
+ err = LocateValueAll(g, (PJVAL)jsp);
break;
default:
err = true;
@@ -945,7 +948,7 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx)
/*********************************************************************************/
/* Locate in a JSON Array. */
/*********************************************************************************/
-my_bool JSNX::LocateArrayAll(PJAR jarp)
+my_bool JSNX::LocateArrayAll(PGLOBAL g, PJAR jarp)
{
if (I < Imax) {
Jpnp[++I].Type = TYPE_JAR;
@@ -953,7 +956,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp)
for (int i = 0; i < jarp->size(); i++) {
Jpnp[I].N = i;
- if (LocateValueAll(jarp->GetValue(i)))
+ if (LocateValueAll(g, jarp->GetArrayValue(i)))
return true;
} // endfor i
@@ -967,7 +970,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp)
/*********************************************************************************/
/* Locate in a JSON Object. */
/*********************************************************************************/
-my_bool JSNX::LocateObjectAll(PJOB jobp)
+my_bool JSNX::LocateObjectAll(PGLOBAL g, PJOB jobp)
{
if (I < Imax) {
Jpnp[++I].Type = TYPE_JOB;
@@ -975,7 +978,7 @@ my_bool JSNX::LocateObjectAll(PJOB jobp)
for (PJPR pair = jobp->First; pair; pair = pair->Next) {
Jpnp[I].Key = pair->Key;
- if (LocateValueAll(pair->Val))
+ if (LocateValueAll(g, pair->Val))
return true;
} // endfor i
@@ -989,14 +992,14 @@ my_bool JSNX::LocateObjectAll(PJOB jobp)
/*********************************************************************************/
/* Locate a JSON Value. */
/*********************************************************************************/
-my_bool JSNX::LocateValueAll(PJVAL jvp)
+my_bool JSNX::LocateValueAll(PGLOBAL g, PJVAL jvp)
{
- if (CompareTree(Jvalp, jvp))
+ if (CompareTree(g, Jvalp, jvp))
return AddPath();
else if (jvp->GetArray())
- return LocateArrayAll(jvp->GetArray());
+ return LocateArrayAll(g, jvp->GetArray());
else if (jvp->GetObject())
- return LocateObjectAll(jvp->GetObject());
+ return LocateObjectAll(g, jvp->GetObject());
return false;
} // end of LocateValueAll
@@ -1004,7 +1007,7 @@ my_bool JSNX::LocateValueAll(PJVAL jvp)
/*********************************************************************************/
/* Compare two JSON trees. */
/*********************************************************************************/
-my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
+my_bool JSNX::CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2)
{
if (!jp1 || !jp2 || jp1->GetType() != jp2->GetType()
|| jp1->size() != jp2->size())
@@ -1013,26 +1016,22 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
my_bool found = true;
if (jp1->GetType() == TYPE_JVAL) {
- PVAL v1 = jp1->GetValue(), v2 = jp2->GetValue();
-
- if (v1 && v2) {
- if (v1->GetType() == v2->GetType())
- found = !v1->CompareValue(v2);
- else
- found = false;
+// PVL v1 = ((PJVAL)jp1)->GetVal(), v2 = ((PJVAL)jp2)->GetVal();
- } else
- found = CompareTree(jp1->GetJsp(), jp2->GetJsp());
+ if (((PJVAL)jp1)->DataType == TYPE_JSON && ((PJVAL)jp2)->DataType == TYPE_JSON)
+ found = CompareTree(g, jp1->GetJsp(), jp2->GetJsp());
+ else
+ found = CompareValues(((PJVAL)jp1), ((PJVAL)jp2));
} else if (jp1->GetType() == TYPE_JAR) {
for (int i = 0; found && i < jp1->size(); i++)
- found = (CompareTree(jp1->GetValue(i), jp2->GetValue(i)));
+ found = (CompareTree(g, jp1->GetArrayValue(i), jp2->GetArrayValue(i)));
} else if (jp1->GetType() == TYPE_JOB) {
PJPR p1 = jp1->GetFirst(), p2 = jp2->GetFirst();
for (; found && p1 && p2; p1 = p1->Next, p2 = p2->Next)
- found = CompareTree(p1->Val, p2->Val);
+ found = CompareTree(g, p1->Val, p2->Val);
} else
found = false;
@@ -1041,10 +1040,68 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
} // end of CompareTree
/*********************************************************************************/
-/* Add the found path to the list. */
+/* Compare two VAL values and return true if they are equal. */
/*********************************************************************************/
-my_bool JSNX::AddPath(void)
+my_bool JSNX::CompareValues(PJVAL v1, PJVAL v2)
{
+ my_bool b = false;
+
+ switch (v1->DataType) {
+ case TYPE_STRG:
+ if (v2->DataType == TYPE_STRG) {
+ if (v1->Nd || v2->Nd) // Case insensitive
+ b = (!stricmp(v1->Strp, v2->Strp));
+ else
+ b = (!strcmp(v1->Strp, v2->Strp));
+
+ } // endif Type
+
+ break;
+ case TYPE_DTM:
+ if (v2->DataType == TYPE_DTM)
+ b = (!strcmp(v1->Strp, v2->Strp));
+
+ break;
+ case TYPE_INTG:
+ if (v2->DataType == TYPE_INTG)
+ b = (v1->N == v2->N);
+ else if (v2->DataType == TYPE_BINT)
+ b = (v1->N == v2->LLn);
+
+ break;
+ case TYPE_BINT:
+ if (v2->DataType == TYPE_INTG)
+ b = (v1->LLn == v2->N);
+ else if (v2->DataType == TYPE_BINT)
+ b = (v1->LLn == v2->LLn);
+
+ break;
+ case TYPE_DBL:
+ if (v2->DataType == TYPE_DBL)
+ b = (v1->F == v2->F);
+
+ break;
+ case TYPE_BOOL:
+ if (v2->DataType == TYPE_BOOL)
+ b = (v1->B == v2->B);
+
+ break;
+ case TYPE_NULL:
+ if (v2->DataType == TYPE_NULL)
+ b = true;
+
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ return b;
+} // end of CompareValues
+
+/*********************************************************************************/
+/* Add the found path to the list. */
+/*********************************************************************************/
+my_bool JSNX::AddPath(void) {
char s[16];
if (Jp->WriteStr("\"$"))
@@ -1113,7 +1170,7 @@ static void SetChanged(PBSON bsp)
/*********************************************************************************/
/* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */
/*********************************************************************************/
-static uint GetJsonGroupSize(void)
+uint GetJsonGroupSize(void)
{
return (JsonGrpSize) ? JsonGrpSize : GetJsonGrpSize();
} // end of GetJsonGroupSize
@@ -1121,12 +1178,16 @@ static uint GetJsonGroupSize(void)
/*********************************************************************************/
/* Program for SubSet re-initialization of the memory pool. */
/*********************************************************************************/
-static my_bool JsonSubSet(PGLOBAL g)
+my_bool JsonSubSet(PGLOBAL g, my_bool b)
{
PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
- pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER);
+ pph->To_Free = (g->Saved_Size) ? g->Saved_Size : sizeof(POOLHEADER);
pph->FreeBlk = g->Sarea_Size - pph->To_Free;
+
+ if (b)
+ g->Saved_Size = 0;
+
return FALSE;
} /* end of JsonSubSet */
@@ -1144,7 +1205,7 @@ inline void JsonMemSave(PGLOBAL g)
inline void JsonFreeMem(PGLOBAL g)
{
g->Activityp = NULL;
- PlugExit(g);
+ g = PlugExit(g);
} /* end of JsonFreeMem */
/*********************************************************************************/
@@ -1193,9 +1254,10 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp)
case TYPE_JOB:
jvp = new(g) JVALUE((PJSON)vp);
break;
- case TYPE_VAL:
- jvp = new(g) JVALUE(g, (PVAL)vp);
- break;
+// case TYPE_VAL:
+// jvp = new(g) JVALUE(g, (PVAL)vp);
+// break;
+ case TYPE_DTM:
case TYPE_STRG:
jvp = new(g) JVALUE(g, (PCSZ)vp);
break;
@@ -1211,24 +1273,22 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp)
} // end try/catch
return jvp;
-} /* end of JsonNew */
+} /* end of JvalNew */
/*********************************************************************************/
/* Allocate and initialise the memory area. */
/*********************************************************************************/
-static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
- char *message, my_bool mbn,
- unsigned long reslen, unsigned long memlen,
- unsigned long more = 0)
+my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, char *message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen, unsigned long more)
{
- PGLOBAL g = PlugInit(NULL, memlen + more + 500); // +500 to avoid CheckMem
+ PGLOBAL g = PlugInit(NULL, (size_t)memlen + more + 500); // +500 to avoid CheckMem
if (!g) {
strcpy(message, "Allocation error");
return true;
} else if (g->Sarea_Size == 0) {
strcpy(message, g->Message);
- PlugExit(g);
+ g = PlugExit(g);
return true;
} // endif g
@@ -1382,7 +1442,7 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n)
/*********************************************************************************/
/* Returns not 0 if the argument is a JSON item or file name. */
/*********************************************************************************/
-static int IsJson(UDF_ARGS *args, uint i, bool b)
+int IsJson(UDF_ARGS *args, uint i, bool b)
{
int n = 0;
@@ -1405,7 +1465,7 @@ static int IsJson(UDF_ARGS *args, uint i, bool b)
char *sap;
PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024);
- JsonSubSet(g);
+// JsonSubSet(g);
sap = MakePSZ(g, args, i);
if (ParseJson(g, sap, strlen(sap)))
@@ -1449,9 +1509,8 @@ static long GetFileLength(char *fn)
/*********************************************************************************/
/* Calculate the reslen and memlen needed by a function. */
/*********************************************************************************/
-static my_bool CalcLen(UDF_ARGS *args, my_bool obj,
- unsigned long& reslen, unsigned long& memlen,
- my_bool mod = false)
+my_bool CalcLen(UDF_ARGS *args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod)
{
char fn[_MAX_PATH];
unsigned long i, k, m, n;
@@ -1568,8 +1627,8 @@ static my_bool CalcLen(UDF_ARGS *args, my_bool obj,
/*********************************************************************************/
/* Check if the calculated memory is enough. */
/*********************************************************************************/
-static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
- my_bool m, my_bool obj = false, my_bool mod = false)
+my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
+ my_bool m, my_bool obj, my_bool mod)
{
unsigned long rl, ml;
my_bool b = false;
@@ -1621,7 +1680,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
/*********************************************************************************/
/* Make a zero terminated string from the passed argument. */
/*********************************************************************************/
-static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i)
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i)
{
if (args->arg_count > (unsigned)i && args->args[i]) {
int n = args->lengths[i];
@@ -1690,7 +1749,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
/*********************************************************************************/
/* Parse a json file. */
/*********************************************************************************/
-static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
+static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len)
{
char *memory;
HANDLE hFile;
@@ -1712,9 +1771,13 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
} // endif hFile
/*******************************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************************/
- len = mm.lenL;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
@@ -1742,7 +1805,7 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
/*********************************************************************************/
/* Return a json file contains. */
/*********************************************************************************/
-static char *GetJsonFile(PGLOBAL g, char *fn)
+char *GetJsonFile(PGLOBAL g, char *fn)
{
char *str;
int h, n, len;
@@ -1784,7 +1847,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
{
char *sap = (args->arg_count > i) ? args->args[i] : NULL;
int n, len;
- short c;
+ int ci;
long long bigint;
PJSON jsp;
PJVAL jvp = new(g) JVALUE;
@@ -1827,8 +1890,8 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
jvp->SetValue(jsp);
} else {
- c = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
- jvp->SetString(g, sap, c);
+ ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
+ jvp->SetString(g, sap, ci);
} // endif n
} // endif len
@@ -1839,7 +1902,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) ||
(bigint == 1LL && !strcmp(args->attributes[i], "TRUE")))
- jvp->SetTiny(g, (char)bigint);
+ jvp->SetBool(g, (char)bigint);
else
jvp->SetBigint(g, bigint);
@@ -1894,6 +1957,8 @@ static PJVAL MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i,
return jvp;
} // end of MakeTypedValue
+/* ------------------------------ The JSON UDF's ------------------------------- */
+
/*********************************************************************************/
/* Make a Json value containing the parameter. */
/*********************************************************************************/
@@ -1962,7 +2027,7 @@ char *json_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
PJAR arp = new(g)JARRAY;
for (uint i = 0; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
@@ -2032,13 +2097,13 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
arp = new(g)JARRAY;
- arp->AddValue(g, jvp);
+ arp->AddArrayValue(g, jvp);
top = arp;
} else
arp = jvp->GetArray();
for (uint i = 1; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
str = MakeResult(g, args, top, args->arg_count);
@@ -2130,7 +2195,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, JvalNew(gb, TYPE_JVAL, jvp));
+ arp->AddArrayValue(gb, JvalNew(gb, TYPE_JVAL, jvp));
jvp->SetValue(arp);
if (!top)
@@ -2142,7 +2207,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
arp = jvp->GetArray();
if (arp) {
- arp->AddValue(gb, MakeValue(gb, args, 1), x);
+ arp->AddArrayValue(gb, MakeValue(gb, args, 1), x);
arp->InitArray(gb);
str = MakeResult(g, args, top, n);
} else
@@ -2311,7 +2376,7 @@ long long jsonsum_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *err
PJAR arp = jvp->GetArray();
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetBigint();
+ n += arp->GetArrayValue(i)->GetBigint();
} else {
PUSH_WARNING("First argument target is not an array");
@@ -2386,7 +2451,7 @@ double jsonsum_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error
PJAR arp = jvp->GetArray();
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetFloat();
+ n += arp->GetArrayValue(i)->GetFloat();
} else {
PUSH_WARNING("First argument target is not an array");
@@ -2451,7 +2516,7 @@ double jsonavg_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error
if (arp->size()) {
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetFloat();
+ n += arp->GetArrayValue(i)->GetFloat();
n /= arp->size();
} // endif size
@@ -2510,7 +2575,7 @@ char *json_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
- objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2560,7 +2625,7 @@ char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
if (!(jvp = MakeValue(g, args, i))->IsNull())
- objp->SetValue(g, jvp, MakeKey(g, args, i));
+ objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2612,7 +2677,7 @@ char *json_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i += 2)
- objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2696,7 +2761,7 @@ char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
jobp = jvp->GetObject();
jvp = MakeValue(gb, args, 1);
key = MakeKey(gb, args, 1);
- jobp->SetValue(gb, jvp, key);
+ jobp->SetKeyValue(gb, jvp, key);
str = MakeResult(g, args, top);
} else {
PUSH_WARNING("First argument target is not an object");
@@ -3049,7 +3114,7 @@ void json_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PJAR arp = (PJAR)g->Activityp;
if (arp && g->N-- > 0)
- arp->AddValue(g, MakeValue(g, args, 0));
+ arp->AddArrayValue(g, MakeValue(g, args, 0));
} // end of json_array_grp_add
@@ -3126,7 +3191,7 @@ void json_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PJOB objp = (PJOB)g->Activityp;
if (g->N-- > 0)
- objp->SetValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0));
+ objp->SetKeyValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0));
} // end of json_object_grp_add
@@ -4007,17 +4072,14 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return JsonInit(initid, args, message, false, reslen, memlen, more);
} // end of jsoncontains_init
-long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *error)
+long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
{
- char *p __attribute__((unused)), res[256];
- long long n;
+ char isn, res[256];
unsigned long reslen;
- *is_null = 0;
- p = jsonlocate(initid, args, res, &reslen, is_null, error);
- n = (*is_null) ? 0LL : 1LL;
- return n;
+ isn = 0;
+ jsonlocate(initid, args, res, &reslen, &isn, error);
+ return (isn) ? 0LL : 1LL;
} // end of jsoncontains
void jsoncontains_deinit(UDF_INIT* initid)
@@ -4059,8 +4121,7 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsoncontains_path_init
-long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *error)
+long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
{
char *p, *path;
long long n;
@@ -4071,7 +4132,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (g->N) {
if (!g->Activityp) {
- *is_null = 1;
return 0LL;
} else
return *(long long*)g->Activityp;
@@ -4129,7 +4189,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
err:
if (g->Mrr) *error = 1;
- *is_null = 1;
return 0LL;
} // end of jsoncontains_path
@@ -4406,7 +4465,8 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
fn = MakePSZ(g, args, 0);
if (args->arg_count > 1) {
- int len, pretty = 3, pty = 3;
+ int pretty = 3, pty = 3;
+ size_t len;
PJSON jsp;
PJVAL jvp = NULL;
@@ -4609,7 +4669,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
strcat(bsp->Msg, " array");
for (uint i = 0; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
} // endif arp && bsp
@@ -4670,7 +4730,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, jvp);
+ arp->AddArrayValue(gb, jvp);
top = arp;
} // endif arp
@@ -4678,7 +4738,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
arp = jvp->GetArray();
for (uint i = 1; i < args->arg_count; i++)
- arp->AddValue(gb, MakeValue(gb, args, i));
+ arp->AddArrayValue(gb, MakeValue(gb, args, i));
arp->InitArray(gb);
@@ -4761,7 +4821,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp));
+ arp->AddArrayValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp));
jvp->SetValue(arp);
if (!top)
@@ -4772,7 +4832,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else
arp = jvp->GetArray();
- arp->AddValue(gb, MakeValue(gb, args, 1), x);
+ arp->AddArrayValue(gb, MakeValue(gb, args, 1), x);
arp->InitArray(gb);
} else {
PUSH_WARNING("First argument target is not an array");
@@ -4900,7 +4960,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
- objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
@@ -4957,7 +5017,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
if (!(jvp = MakeValue(g, args, i))->IsNull())
- objp->SetValue(g, jvp, MakeKey(g, args, i));
+ objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
strcat(bsp->Msg, " object");
@@ -5016,7 +5076,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i += 2)
- objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
strcat(bsp->Msg, " object");
@@ -5094,7 +5154,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
jobp = jvp->GetObject();
jvp = MakeValue(gb, args, 1);
key = MakeKey(gb, args, 1);
- jobp->SetValue(gb, jvp, key);
+ jobp->SetKeyValue(gb, jvp, key);
} else {
PUSH_WARNING("First argument target is not an object");
// if (g->Mrr) *error = 1; (only if no path)
@@ -5313,7 +5373,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
// Get the json tree
if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) {
- jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue());
+ jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g));
if ((bsp = JbinAlloc(g, args, initid->max_length, jsp)))
strcat(bsp->Msg, " item");
@@ -5639,7 +5699,8 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *fn;
- int pretty = 3, len = 0, pty = 3;
+ int pretty = 3, pty = 3;
+ size_t len = 0;
PJSON jsp;
PJVAL jvp = NULL;
PGLOBAL g = (PGLOBAL)initid->ptr;
@@ -5782,11 +5843,11 @@ my_bool jfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
} // endif args
CalcLen(args, false, reslen, memlen);
- return JsonInit(initid, args, message, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
} // end of jfile_convert_init
char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
- unsigned long *res_length, char *, char *error) {
+ unsigned long *res_length, char *is_null, char *error) {
char *str, *fn, *ofn;
int lrecl = (int)*(longlong*)args->args[2];
PGLOBAL g = (PGLOBAL)initid->ptr;
@@ -5804,10 +5865,15 @@ char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
str = (char*)g->Xchk;
if (!str) {
- str = PlugDup(g, g->Message);
- } // endif str
+ PUSH_WARNING(g->Message ? g->Message : "Unexpected error");
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else {
+ strcpy(result, str);
+ *res_length = strlen(str);
+ } // endif str
- *res_length = strlen(str);
return str;
} // end of jfile_convert
@@ -5815,9 +5881,136 @@ void jfile_convert_deinit(UDF_INIT* initid) {
JsonFreeMem((PGLOBAL)initid->ptr);
} // end of jfile_convert_deinit
+/*********************************************************************************/
+/* Convert a prettiest Json file to Pretty=0. */
+/*********************************************************************************/
+my_bool jfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 2 && args->arg_count != 3) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i + 1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen * M;
+ memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024;
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of jfile_bjson_init
+
+char *jfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char*, char *error) {
+ char *fn, *ofn, *buf, *str = NULL;
+ bool loop;
+ ssize_t len, newloc;
+ size_t lrecl, *binszp;
+ PJSON jsp;
+ SWAP *swp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+ ofn = MakePSZ(g, args, 1);
+
+ if (args->arg_count == 3)
+ lrecl = (size_t)*(longlong*)args->args[2];
+ else
+ lrecl = 1024;
+
+ if (!g->Xchk) {
+ int msgid = MSGID_OPEN_MODE_STRERROR;
+ FILE *fout;
+ FILE *fin;
+
+ if (!(fin = global_fopen(g, msgid, fn, "rt")))
+ str = strcpy(result, g->Message);
+ else if (!(fout = global_fopen(g, msgid, ofn, "wb")))
+ str = strcpy(result, g->Message);
+ else if ((buf = (char*)PlgDBSubAlloc(g, NULL, lrecl)) &&
+ (binszp = (size_t*)PlgDBSubAlloc(g, NULL, sizeof(size_t)))) {
+ JsonMemSave(g);
+
+ try {
+ do {
+ loop = false;
+ JsonSubSet(g);
+
+ if (!fgets(buf, lrecl, fin)) {
+ if (!feof(fin)) {
+ sprintf(g->Message, "Error %d reading %zd bytes from %s", errno, lrecl, fn);
+ str = strcpy(result, g->Message);
+ } else
+ str = strcpy(result, ofn);
+
+ } else if ((len = strlen(buf))) {
+ if ((jsp = ParseJson(g, buf, len))) {
+ newloc = (size_t)PlugSubAlloc(g, NULL, 0);
+ *binszp = newloc - (size_t)jsp;
+
+ swp = new(g) SWAP(g, jsp);
+ swp->SwapJson(jsp, true);
+
+ if (fwrite(binszp, sizeof(binszp), 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, sizeof(binszp), ofn);
+ str = strcpy(result, g->Message);
+ } else if (fwrite(jsp, *binszp, 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, *binszp, ofn);
+ str = strcpy(result, g->Message);
+ } else
+ loop = true;
+
+ } else {
+ str = strcpy(result, g->Message);
+ } // endif jsp
+
+ } else
+ loop = true;
+
+ } while (loop);
+
+ } catch (int) {
+ str = strcpy(result, g->Message);
+ } catch (const char* msg) {
+ str = strcpy(result, msg);
+ } // end catch
+
+ } else
+ str = strcpy(result, g->Message);
+
+ if (fin) fclose(fin);
+ if (fout) fclose(fout);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ if (g->Message)
+ str = strcpy(result, g->Message);
+ else
+ str = strcpy(result, "Unexpected error");
+
+ } // endif str
+
+ *res_length = strlen(str);
+ return str;
+} // end of jfile_bjson
+
+void jfile_bjson_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of jfile_bjson_deinit
+
/* --------------------------------- Class JUP --------------------------------- */
-#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
/*********************************************************************************/
/* JUP public constructor. */
@@ -5825,7 +6018,9 @@ void jfile_convert_deinit(UDF_INIT* initid) {
JUP::JUP(PGLOBAL g) {
fs = NULL;
s = buff = NULL;
- i = k = len = recl = 0;
+ len = 0;
+ k = recl = 0;
+ i = 0;
} // end of JUP constructor
/*********************************************************************************/
@@ -5853,11 +6048,16 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
/*******************************************************************************/
/* Get the file size (assuming file is smaller than 4 GB) */
/*******************************************************************************/
- if (!mm.lenL) { // Empty or deleted file
+ if (!mm.lenL && !mm.lenH) { // Empty or deleted file
CloseFileHandle(hFile);
return NULL;
- } else
- len = (int)mm.lenL;
+ } else {
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ } // endif size
if (!mm.memory) {
CloseFileHandle(hFile);
@@ -5875,7 +6075,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
sprintf(g->Message, MSG(OPEN_MODE_ERROR),
"w", (int)errno, outfn);
strcat(strcat(g->Message, ": "), strerror(errno));
- CloseMemMap(mm.memory, (size_t)mm.lenL);
+ CloseMemMap(mm.memory, len);
return NULL;
} // endif fs
@@ -5884,7 +6084,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
if (!unPretty(g, lrecl))
ret = outfn;
- CloseMemMap(mm.memory, (size_t)mm.lenL);
+ CloseMemMap(mm.memory, len);
fclose(fs);
return ret;
} // end of UnprettyJsonFile
@@ -6329,8 +6529,7 @@ my_bool countin_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return false;
} // end of countin_init
-long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *)
+long long countin(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *)
{
PSZ str1, str2;
char *s;
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index 897b0fe9919..689a02ebbc5 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -1,10 +1,11 @@
/******************** tabjson H Declares Source Code File (.H) *******************/
-/* Name: jsonudf.h Version 1.3 */
+/* Name: jsonudf.h Version 1.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2015-2020 */
/* */
/* This file contains the JSON UDF function and class declares. */
/*********************************************************************************/
+#pragma once
#include "global.h"
#include "plgdbsem.h"
#include "block.h"
@@ -15,6 +16,27 @@
#define UDF_EXEC_ARGS \
UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char*
+// BSON size should be equal on Linux and Windows
+#define BMX 255
+typedef struct BSON* PBSON;
+
+/***********************************************************************/
+/* Structure used to return binary json to Json UDF functions. */
+/***********************************************************************/
+struct BSON {
+ char Msg[BMX + 1];
+ char *Filename;
+ PGLOBAL G;
+ int Pretty;
+ ulong Reslen;
+ my_bool Changed;
+ PJSON Top;
+ PJSON Jsp;
+ PBSON Bsp;
+}; // end of struct BSON
+
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+
/*********************************************************************************/
/* The JSON tree node. Can be an Object or an Array. */
/*********************************************************************************/
@@ -29,9 +51,29 @@ typedef struct _jnode {
} JNODE, *PJNODE;
typedef class JSNX *PJSNX;
-typedef class JOUTPATH *PJTP;
-typedef class JOUTALL *PJTA;
+/*********************************************************************************/
+/* The JSON utility functions. */
+/*********************************************************************************/
+bool IsNum(PSZ s);
+char *NextChr(PSZ s, char sep);
+char *GetJsonNull(void);
+uint GetJsonGrpSize(void);
+my_bool JsonSubSet(PGLOBAL g, my_bool b = false);
+my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod = false);
+my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0);
+my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n,
+ my_bool m, my_bool obj = false, my_bool mod = false);
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i);
+int IsJson(UDF_ARGS* args, uint i, bool b = false);
+char *GetJsonFile(PGLOBAL g, char* fn);
+
+/*********************************************************************************/
+/* The JSON UDF functions. */
+/*********************************************************************************/
extern "C" {
DllExport my_bool jsonvalue_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *jsonvalue(UDF_EXEC_ARGS);
@@ -132,7 +174,7 @@ extern "C" {
DllExport void jsonget_real_deinit(UDF_INIT*);
DllExport my_bool jsoncontains_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long jsoncontains(UDF_EXEC_ARGS);
+ DllExport long long jsoncontains(UDF_INIT*, UDF_ARGS*, char*, char*);
DllExport void jsoncontains_deinit(UDF_INIT*);
DllExport my_bool jsonlocate_init(UDF_INIT*, UDF_ARGS*, char*);
@@ -144,7 +186,7 @@ extern "C" {
DllExport void json_locate_all_deinit(UDF_INIT*);
DllExport my_bool jsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long jsoncontains_path(UDF_EXEC_ARGS);
+ DllExport long long jsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*);
DllExport void jsoncontains_path_deinit(UDF_INIT*);
DllExport my_bool json_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
@@ -239,6 +281,10 @@ extern "C" {
DllExport char* jfile_convert(UDF_EXEC_ARGS);
DllExport void jfile_convert_deinit(UDF_INIT*);
+ DllExport my_bool jfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* jfile_bjson(UDF_EXEC_ARGS);
+ DllExport void jfile_bjson_deinit(UDF_INIT*);
+
DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *envar(UDF_EXEC_ARGS);
@@ -248,17 +294,17 @@ extern "C" {
#endif // DEVELOPMENT
DllExport my_bool countin_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long countin(UDF_EXEC_ARGS);
-} // extern "C"
+ DllExport long long countin(UDF_INIT*, UDF_ARGS*, char*, char*);
+} // extern "C"
/*********************************************************************************/
/* Structure JPN. Used to make the locate path. */
/*********************************************************************************/
typedef struct _jpn {
- enum JTYP Type;
- PCSZ Key;
- int N;
+ int Type;
+ PCSZ Key;
+ int N;
} JPN, *PJPN;
/*********************************************************************************/
@@ -290,15 +336,16 @@ protected:
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
- my_bool LocateArray(PJAR jarp);
- my_bool LocateObject(PJOB jobp);
- my_bool LocateValue(PJVAL jvp);
- my_bool LocateArrayAll(PJAR jarp);
- my_bool LocateObjectAll(PJOB jobp);
- my_bool LocateValueAll(PJVAL jvp);
- my_bool CompareTree(PJSON jp1, PJSON jp2);
+ my_bool CompareValues(PJVAL v1, PJVAL v2);
+ my_bool LocateArray(PGLOBAL g, PJAR jarp);
+ my_bool LocateObject(PGLOBAL g, PJOB jobp);
+ my_bool LocateValue(PGLOBAL g, PJVAL jvp);
+ my_bool LocateArrayAll(PGLOBAL g, PJAR jarp);
+ my_bool LocateObjectAll(PGLOBAL g, PJOB jobp);
+ my_bool LocateValueAll(PGLOBAL g, PJVAL jvp);
+ my_bool CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2);
my_bool AddPath(void);
// Default constructor not to be used
@@ -355,11 +402,10 @@ public:
void CopyNumeric(PGLOBAL g);
// Members
- FILE* fs;
- char* s;
- char* buff;
- int len;
- int recl;
- int i, k;
+ FILE *fs;
+ char *s;
+ char *buff;
+ size_t len;
+ uint i;
+ int k, recl;
}; // end of class JUP
-
diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp
index 69bbe980eba..61921555ad7 100644
--- a/storage/connect/libdoc.cpp
+++ b/storage/connect/libdoc.cpp
@@ -378,7 +378,7 @@ bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped)
if (zipped && InitZip(g, entry))
return true;
- int n __attribute__((unused))= xmlKeepBlanksDefault(1);
+ xmlKeepBlanksDefault(1);
return MakeNSlist(g);
} // end of Initialize
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index f8b3dc03aa5..e3fa00e119f 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -16,9 +16,9 @@
/*************** Mycat CC Program Source Code File (.CC) ***************/
/* PROGRAM NAME: MYCAT */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
-/* Author: Olivier Bertrand 2012 - 2019 */
+/* Author: Olivier Bertrand 2012 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -82,7 +82,11 @@
#endif // JAVA_SUPPORT
#include "tabpivot.h"
#include "tabvir.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "ha_connect.h"
#if defined(XML_SUPPORT)
#include "tabxml.h"
@@ -107,6 +111,9 @@ extern "C" HINSTANCE s_hModule; // Saved module handle
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
bool MongoEnabled(void);
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void);
+#endif // BSON_SUPPORT
/***********************************************************************/
/* Get the plugin directory. */
@@ -130,25 +137,25 @@ TABTYPE GetTypeID(const char *type)
: (!stricmp(type, "DBF")) ? TAB_DBF
#if defined(XML_SUPPORT)
: (!stricmp(type, "XML")) ? TAB_XML
-#endif
+#endif // XML_SUPPORT
: (!stricmp(type, "INI")) ? TAB_INI
: (!stricmp(type, "VEC")) ? TAB_VEC
#if defined(ODBC_SUPPORT)
: (!stricmp(type, "ODBC")) ? TAB_ODBC
-#endif
+#endif // ODBC_SUPPORT
#if defined(JAVA_SUPPORT)
: (!stricmp(type, "JDBC")) ? TAB_JDBC
-#endif
+#endif // JAVA_SUPPORT
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
: (!stricmp(type, "MONGO") && MongoEnabled()) ? TAB_MONGO
-#endif
+#endif // JAVA_SUPPORT || CMGO_SUPPORT
: (!stricmp(type, "MYSQL")) ? TAB_MYSQL
: (!stricmp(type, "MYPRX")) ? TAB_MYSQL
: (!stricmp(type, "DIR")) ? TAB_DIR
#if defined(__WIN__)
: (!stricmp(type, "MAC")) ? TAB_MAC
: (!stricmp(type, "WMI")) ? TAB_WMI
-#endif
+#endif // __WIN__
: (!stricmp(type, "TBL")) ? TAB_TBL
: (!stricmp(type, "XCOL")) ? TAB_XCL
: (!stricmp(type, "OCCUR")) ? TAB_OCCUR
@@ -157,9 +164,12 @@ TABTYPE GetTypeID(const char *type)
: (!stricmp(type, "PIVOT")) ? TAB_PIVOT
: (!stricmp(type, "VIR")) ? TAB_VIR
: (!stricmp(type, "JSON")) ? TAB_JSON
+#if defined(BSON_SUPPORT)
+ : (!stricmp(type, "BSON")) ? TAB_BSON
+#endif // BSON_SUPPORT
#if defined(ZIP_SUPPORT)
: (!stricmp(type, "ZIP")) ? TAB_ZIP
-#endif
+#endif // ZIP_SUPPORT
: (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
} // end of GetTypeID
@@ -181,6 +191,9 @@ bool IsFileType(TABTYPE type)
case TAB_INI:
case TAB_VEC:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
case TAB_REST:
// case TAB_ZIP:
isfile= true;
@@ -276,6 +289,9 @@ bool IsTypeIndexable(TABTYPE type)
case TAB_VEC:
case TAB_DBF:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
idx= true;
break;
default:
@@ -302,6 +318,9 @@ int GetIndexType(TABTYPE type)
case TAB_VEC:
case TAB_DBF:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
xtyp= 1;
break;
case TAB_MYSQL:
@@ -445,7 +464,7 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_XML: tdp= new(g) XMLDEF; break;
#endif // XML_SUPPORT
#if defined(VCT_SUPPORT)
- case TAB_VEC: tdp = new(g) VCTDEF; break;
+ case TAB_VEC: tdp= new(g) VCTDEF; break;
#endif // VCT_SUPPORT
#if defined(ODBC_SUPPORT)
case TAB_ODBC: tdp= new(g) ODBCDEF; break;
@@ -465,9 +484,20 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_MYSQL: tdp= new(g) MYSQLDEF; break;
case TAB_PIVOT: tdp= new(g) PIVOTDEF; break;
case TAB_VIR: tdp= new(g) VIRDEF; break;
- case TAB_JSON: tdp= new(g) JSONDEF; break;
+ case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ if (Force_Bson())
+ tdp= new(g) BSONDEF;
+ else
+#endif // BSON_SUPPORT
+ tdp= new(g) JSONDEF;
+
+ break;
+#if defined(BSON_SUPPORT)
+ case TAB_BSON: tdp= new(g) BSONDEF; break;
+#endif // BSON_SUPPORT
#if defined(ZIP_SUPPORT)
- case TAB_ZIP: tdp = new(g) ZIPDEF; break;
+ case TAB_ZIP: tdp= new(g) ZIPDEF; break;
#endif // ZIP_SUPPORT
#if defined(REST_SUPPORT)
case TAB_REST: tdp= new (g) RESTDEF; break;
diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def
index a4d629fc3d1..5107de7a930 100644
--- a/storage/connect/mysql-test/connect/disabled.def
+++ b/storage/connect/mysql-test/connect/disabled.def
@@ -16,9 +16,12 @@ jdbc_postgresql : Variable settings depend on machine configuration
json_mongo_c : Need MongoDB running and its C Driver installed
json_java_2 : Need MongoDB running and its Java Driver installed
json_java_3 : Need MongoDB running and its Java Driver installed
+bson_mongo_c : Need MongoDB running and its C Driver installed
+bson_java_2 : Need MongoDB running and its Java Driver installed
+bson_java_3 : Need MongoDB running and its Java Driver installed
mongo_c : Need MongoDB running and its C Driver installed
mongo_java_2 : Need MongoDB running and its Java Driver installed
mongo_java_3 : Need MongoDB running and its Java Driver installed
tbl_thread : Bug MDEV-9844,10179,14214 03/01/2018 OB Option THREAD removed
-grant2 : Until fixed
+#bson : Development
#vcol : Different error code on different versions
diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result
index 7cdb1e5d21c..d2f882f1287 100644
--- a/storage/connect/mysql-test/connect/r/alter_xml.result
+++ b/storage/connect/mysql-test/connect/r/alter_xml.result
@@ -54,7 +54,7 @@ line
</t1>
# NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
# Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
c d
1 One
@@ -64,7 +64,7 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c` int(11) NOT NULL,
- `d` char(10) NOT NULL `FIELD_FORMAT`='@'
+ `d` char(10) NOT NULL `XPATH`='@'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=domdoc,rownode=row' `HEADER`=0
SELECT * FROM t2;
line
diff --git a/storage/connect/mysql-test/connect/r/alter_xml2.result b/storage/connect/mysql-test/connect/r/alter_xml2.result
index 8eb56e3dcc3..a15be966aa8 100644
--- a/storage/connect/mysql-test/connect/r/alter_xml2.result
+++ b/storage/connect/mysql-test/connect/r/alter_xml2.result
@@ -56,7 +56,7 @@ line
</t1>
# NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
# Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
c d
1 One
@@ -66,7 +66,7 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c` int(11) NOT NULL,
- `d` char(10) NOT NULL `FIELD_FORMAT`='@'
+ `d` char(10) NOT NULL `XPATH`='@'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=libxml2,rownode=row' `HEADER`=0
SELECT * FROM t2;
line
diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result
new file mode 100644
index 00000000000..fd15e020aac
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson.result
@@ -0,0 +1,517 @@
+#
+# Testing doc samples
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+LANG CHAR(2),
+SUBJECT CHAR(32),
+AUTHOR CHAR(64),
+TITLE CHAR(32),
+TRANSLATION CHAR(32),
+TRANSLATOR CHAR(80),
+PUBLISHER CHAR(32),
+DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB
+9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing Jpath. Get the number of authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+Authors INT(2) JPATH='$.AUTHOR[#]',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject Authors Title Translation Translator Publisher Location Year
+9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications 1 XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Concatenates the authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe and François Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing expanding authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+#
+# To add an author a new table must be created
+#
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR';
+SELECT * FROM t2;
+FIRSTNAME LASTNAME
+William J. Pardi
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+9782840825685 fr applications Charles Dickens XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+DROP TABLE t2;
+#
+# Check the biblio file has the good format
+#
+CREATE TABLE t1
+(
+line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
+SELECT * FROM t1;
+line
+[
+ {
+ "ISBN": "9782212090819",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "Jean-Christophe",
+ "LASTNAME": "Bernadac"
+ },
+ {
+ "FIRSTNAME": "Philippe",
+ "LASTNAME": "Knab"
+ }
+ ],
+ "TITLE": "Construire une application XML",
+ "PUBLISHER": {
+ "NAME": "Eyrolles",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ },
+ {
+ "ISBN": "9782840825685",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "William J.",
+ "LASTNAME": "Pardi"
+ },
+ {
+ "FIRSTNAME": "Charles",
+ "LASTNAME": "Dickens"
+ }
+ ],
+ "TITLE": "XML en Action",
+ "TRANSLATION": "adapté de l'anglais par",
+ "TRANSLATOR": {
+ "FIRSTNAME": "James",
+ "LASTNAME": "Guerin"
+ },
+ "PUBLISHER": {
+ "NAME": "Microsoft Press",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ }
+]
+DROP TABLE t1;
+#
+# Testing a pretty=0 file
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15) NOT NULL,
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB',
+INDEX IX(ISBN)
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 1 IX 1 ISBN A NULL NULL NULL XINDEX
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year
+9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications François Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 2001
+DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref IX IX 15 const 1 Using where
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819';
+ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT
+DROP TABLE t1;
+#
+# A file with 2 arrays
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer+Food+Food+Car 69.00
+Joe 4 Beer+Beer+Food+Food+Beer 83.00
+Joe 5 Beer+Food 26.00
+Beth 3 Beer 16.00
+Beth 4 Food+Beer 32.00
+Beth 5 Food+Beer 32.00
+Janet 3 Car+Food+Beer 55.00
+Janet 4 Car 17.00
+Janet 5 Beer+Car+Beer+Food 57.00
+DROP TABLE t1;
+#
+# Now it can be fully expanded
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 3 Beer 16.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Janet 4 Car 17.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+DROP TABLE t1;
+#
+# A table showing many calculated results
+#
+CREATE TABLE t1 (
+WHO CHAR(12) NOT NULL,
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE
+Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18
+Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00
+Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12
+DROP TABLE t1;
+#
+# Expand expense in 3 one week tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The expanded table is made as a TBL table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+DROP TABLE t1, t2, t3, t4;
+#
+# Three partial JSON tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The complete table can be a multiple JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+WHO WEEK WHAT AMOUNT
+Beth 3 Beer 16.00
+Beth 4 Beer 15.00
+Beth 4 Food 17.00
+Beth 5 Beer 20.00
+Beth 5 Food 12.00
+Janet 3 Beer 18.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 4 Car 17.00
+Janet 5 Beer 14.00
+Janet 5 Beer 19.00
+Janet 5 Car 12.00
+Janet 5 Food 12.00
+Joe 3 Beer 18.00
+Joe 3 Car 20.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 4 Beer 14.00
+Joe 4 Beer 16.00
+Joe 4 Beer 19.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+DROP TABLE t1;
+#
+# Or also a partition JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+Warnings:
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SHOW WARNINGS;
+Level Code Message
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+SELECT * FROM t1 WHERE WEEK = 4;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+DROP TABLE t1, t2, t3, t4;
diff --git a/storage/connect/mysql-test/connect/r/bson_java_2.result b/storage/connect/mysql-test/connect/r/bson_java_2.result
new file mode 100644
index 00000000000..1c21fc7c54f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_java_2.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":"2014-03-03T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-09-11T00:00:00.000Z"},"grade":"A","score":6},{"date":{"$date":"2013-01-24T00:00:00.000Z"},"grade":"A","score":10},{"date":{"$date":"2011-11-23T00:00:00.000Z"},"grade":"A","score":9},{"date":{"$date":"2011-03-10T00:00:00.000Z"},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":"2014-12-30T00:00:00.000Z"},"grade":"A","score":8},{"date":{"$date":"2014-07-01T00:00:00.000Z"},"grade":"B","score":23},{"date":{"$date":"2013-04-30T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2012-05-08T00:00:00.000Z"},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":"2014-09-06T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-07-22T00:00:00.000Z"},"grade":"A","score":11},{"date":{"$date":"2012-07-31T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2011-12-29T00:00:00.000Z"},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 1970-01-01
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 1970-01-01
+Wendy'S Flatbush Avenue 8 1970-01-01
+Dj Reynolds Pub And Restaurant West 57 Street 2 1970-01-01
+Riviera Caterer Stillwell Avenue 5 1970-01-01
+Tov Kosher Kitchen 63 Road 20 1970-01-01
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 1970-01-01 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 1970-01-01 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 1970-01-01 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 1970-01-01 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 1970-01-01 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 1970-01-01 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 1970-01-01 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 1970-01-01 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 1970-01-01 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 1970-01-01 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 1970-01-01 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 1970-01-01 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 1970-01-01 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 1970-01-01 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 1970-01-01 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 1970-01-01 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 1970-01-01 01:33:34 B 15
+Tout Va Bien Manhattan 1970-01-01 01:33:34 A 13
+Tout Va Bien Manhattan 1970-01-01 01:33:33 C 36
+Tout Va Bien Manhattan 1970-01-01 01:33:33 B 22
+Tout Va Bien Manhattan 1970-01-01 01:33:32 C 36
+Tout Va Bien Manhattan 1970-01-01 01:33:32 C 7
+La Grenouille Manhattan 1970-01-01 01:33:34 A 10
+La Grenouille Manhattan 1970-01-01 01:33:33 A 9
+La Grenouille Manhattan 1970-01-01 01:33:32 A 13
+Le Perigord Manhattan 1970-01-01 01:33:34 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 1970-01-01 01:33:34
+Bistro Sk A 12 1970-01-01 01:33:34
+Bistro Sk B 18 1970-01-01 01:33:33
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,level=2,version=2';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 NULL 44 27
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_java_3.result b/storage/connect/mysql-test/connect/r/bson_java_3.result
new file mode 100644
index 00000000000..d198ee3faa4
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_java_3.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 2014-03-03
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 2014-03-03
+Wendy'S Flatbush Avenue 8 2014-12-30
+Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06
+Riviera Caterer Stillwell Avenue 5 2014-06-10
+Tov Kosher Kitchen 63 Road 20 2014-11-24
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15
+Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13
+Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36
+Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22
+Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36
+Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7
+La Grenouille Manhattan 2014-04-09 02:00:00 A 10
+La Grenouille Manhattan 2013-03-05 01:00:00 A 9
+La Grenouille Manhattan 2012-02-02 01:00:00 A 13
+Le Perigord Manhattan 2014-07-14 02:00:00 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 2014-11-21 01:00:00
+Bistro Sk A 12 2014-02-19 01:00:00
+Bistro Sk B 18 2013-06-12 02:00:00
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,level=2,version=3';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 NULL 44 27
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_mongo_c.result b/storage/connect/mysql-test/connect/r/bson_mongo_c.result
new file mode 100644
index 00000000000..83bf7cd1974
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_mongo_c.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024
+OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.8560769999999991,40.8484470000000002],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.9617039999999974,40.6629420000000010],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.9851355999999925,40.7676919000000026],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.8560769999999991, 40.8484470000000002 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.9617039999999974, 40.6629420000000010 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.9851355999999925, 40.7676919000000026 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.9824199999999905, 40.5795049999999975 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601151999999956, 40.7311739000000017 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.8560769999999991, 40.8484470000000002) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.9617039999999974, 40.6629420000000010) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.9851355999999925, 40.7676919000000026) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.9824199999999905, 40.5795049999999975) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601151999999956, 40.7311739000000017) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803826999999984, 40.7643124000000014) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286000000026, 40.6119571999999991) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068505999999985, 40.6199033999999983) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.0052889999999906, 40.6288860000000014) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482608999999940, 40.6408271000000028) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 2014-03-03
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 2014-03-03
+Wendy'S Flatbush Avenue 8 2014-12-30
+Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06
+Riviera Caterer Stillwell Avenue 5 2014-06-10
+Tov Kosher Kitchen 63 Road 20 2014-11-24
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15
+Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13
+Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36
+Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22
+Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36
+Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7
+La Grenouille Manhattan 2014-04-09 02:00:00 A 10
+La Grenouille Manhattan 2013-03-05 01:00:00 A 9
+La Grenouille Manhattan 2012-02-02 01:00:00 A 13
+Le Perigord Manhattan 2014-07-14 02:00:00 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 2014-11-21 01:00:00
+Bistro Sk A 12 2014-02-19 01:00:00
+Bistro Sk B 18 2013-06-12 02:00:00
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"projection":{"cuisine":0}}' CONNECTION='mongodb://localhost:27017' LRECL=1024
+OPTION_LIST='Driver=C,level=2,version=0';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(21,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 44 27 NULL
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result
new file mode 100644
index 00000000000..fef55f7d3d9
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_udf.result
@@ -0,0 +1,685 @@
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5;
+#
+# Test UDF's with constant arguments
+#
+SELECT BsonValue(56, 3.1416, 'foo', NULL);
+ERROR HY000: Can't initialize function 'bsonvalue'; Cannot accept more than 1 argument
+SELECT BsonValue(3.1416);
+BsonValue(3.1416)
+3.1416
+SELECT BsonValue(-80);
+BsonValue(-80)
+-80
+SELECT BsonValue('foo');
+BsonValue('foo')
+foo
+SELECT BsonValue(9223372036854775807);
+BsonValue(9223372036854775807)
+9223372036854775807
+SELECT BsonValue(NULL);
+BsonValue(NULL)
+null
+SELECT BsonValue(TRUE);
+BsonValue(TRUE)
+true
+SELECT BsonValue(FALSE);
+BsonValue(FALSE)
+false
+SELECT BsonValue();
+BsonValue()
+null
+SELECT BsonValue('[11, 22, 33]' json_) FROM t1;
+BsonValue('[11, 22, 33]' json_)
+[11,22,33]
+[11,22,33]
+[11,22,33]
+[11,22,33]
+[11,22,33]
+SELECT Bson_Make_Array();
+Bson_Make_Array()
+[]
+SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL);
+Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL)
+[56,3.1416,"My name is \"Foo\"",null]
+SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE);
+Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE)
+[[56,3.1416,"foo"],true]
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array;
+ERROR HY000: Can't initialize function 'bson_array_add'; This function must have at least 2 arguments
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array;
+Array
+[56,3.1416,"foo",null,"One more"]
+SELECT Bson_Array_Add(BsonValue('one value'), 'One more');
+Bson_Array_Add(BsonValue('one value'), 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add('one value', 'One more');
+Bson_Array_Add('one value', 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add('one value' json_, 'One more');
+Bson_Array_Add('one value' json_, 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add(5 json_, 'One more');
+Bson_Array_Add(5 json_, 'One more')
+[5,"One more"]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0);
+Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0)
+[4,5,3,8,7,9]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array;
+Array
+[5,3,4,8,7,9]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9);
+Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9)
+[5,3,8,7,9,4]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1);
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1)
+[1,2,[11,22],"[2]"]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1);
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1)
+[1,2,[11,33,22]]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]');
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]')
+[1,2,[11,33,22]]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array;
+Array
+[56,3.1416,"machin",null,"One more","Two more"]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1;
+Array
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1;
+Array
+[56,3.1416,"machin",1]
+[56,3.1416,"machin",2]
+[56,3.1416,"machin",3]
+[56,3.1416,"machin",4]
+[56,3.1416,"machin",5]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1;
+Array
+[1,3.1416,"machin",1]
+[2,3.1416,"machin",2]
+[3,3.1416,"machin",3]
+[4,3.1416,"machin",4]
+[5,3.1416,"machin",5]
+SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array;
+Array
+[56,3.1416,"machin"]
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0);
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0)
+[3.1416,"My name is \"Foo\"",null]
+SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2);
+Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2)
+{"56":56,"3.1416":3.1416,"My name is Foo":"My name is Foo","NULL":null}
+Warnings:
+Warning 1105 First argument target is not an array
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2');
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2')
+[56,3.1416,"My name is \"Foo\"",null]
+Warnings:
+Warning 1105 Missing or null array index
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2);
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2)
+[56,3.1416,"My name is \"Foo\"",null]
+Warnings:
+Warning 1105 First argument target is not an array
+/* WARNING VOID */
+#
+SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL);
+Bson_Make_Object(56, 3.1416, 'foo', NULL)
+{"56":56,"3.1416":3.1416,"foo":"foo","NULL":null}
+SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty);
+Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty)
+{"qty":56,"price":3.1416,"truc":"foo","garanty":null}
+SELECT Bson_Make_Object();
+Bson_Make_Object()
+{}
+SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL);
+Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL)
+{"Make_Array(56, 3.1416, 'foo')":[56,3.1416,"foo"],"NULL":null}
+SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL);
+Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL)
+[{"qty":56,"price":3.1416,"foo":"foo"},null]
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL);
+Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL)
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null}
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty');
+ERROR HY000: Can't initialize function 'bson_object_key'; This function must have an even number of arguments
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color);
+Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color)
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null,"color":"blue"}
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price);
+Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price)
+{"qty":56,"price":45.99,"truc":"machin","garanty":null}
+SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1);
+Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1)
+NULL
+Warnings:
+Warning 1105 Error 2 opening notexist.json
+Warning 1105 No sub-item at '[1]'
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc');
+Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc')
+{"qty":56,"price":3.1416,"garanty":null}
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose');
+Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose')
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null}
+SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List";
+Key List
+["qty","price","truc","garanty"]
+SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List";
+Key List
+["qty","price","truc","garanty"]
+SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List";
+Value List
+[1,2,3]
+#
+# Test UDF's with column arguments
+#
+SELECT Bsonset_Def_Prec(2);
+Bsonset_Def_Prec(2)
+2
+CREATE TABLE t2
+(
+ISBN CHAR(15),
+LANG CHAR(2),
+SUBJECT CHAR(32),
+AUTHOR CHAR(64),
+TITLE CHAR(32),
+TRANSLATION CHAR(32),
+TRANSLATOR CHAR(80),
+PUBLISHER CHAR(32),
+DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
+Bson_Make_Array(AUTHOR, TITLE, DATEPUB)
+[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999]
+["William J. Pardi","XML en Action",1999]
+SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
+Bson_Make_Object(AUTHOR, TITLE, DATEPUB)
+{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999}
+{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999}
+SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2;
+ERROR HY000: Can't initialize function 'bson_array_grp'; This function can only accept 1 argument
+SELECT Bson_Array_Grp(TITLE) FROM t2;
+Bson_Array_Grp(TITLE)
+["Construire une application XML","XML en Action"]
+CREATE TABLE t3 (
+SERIALNO CHAR(5) NOT NULL,
+NAME VARCHAR(12) NOT NULL FLAG=6,
+SEX SMALLINT(1) NOT NULL,
+TITLE VARCHAR(15) NOT NULL FLAG=20,
+MANAGER CHAR(5) DEFAULT NULL,
+DEPARTMENT CHAr(4) NOT NULL FLAG=41,
+SECRETARY CHAR(5) DEFAULT NULL FLAG=46,
+SALARY DOUBLE(8,2) NOT NULL FLAG=52
+) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1;
+SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT';
+Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)
+{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00}
+SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT;
+DEPARTMENT Bson_Array_Grp(NAME)
+0021 ["STRONG","SHORTSIGHT"]
+0318 ["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]
+0319 ["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL"]
+2452 ["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]
+Warnings:
+Warning 1105 Result truncated to json_grp_size values
+SELECT BsonSet_Grp_Size(30);
+BsonSet_Grp_Size(30)
+30
+SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title;
+Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`)
+{"title":"ADMINISTRATOR","names":["GOOSEPEN","FUNNIGUY","SHRINKY"]}
+{"title":"DIRECTOR","names":["QUINN","WERTHER","STRONG"]}
+{"title":"ENGINEER","names":["BROWNY","ORELLY","MARTIN","TONGHO","WALTER","SMITH"]}
+{"title":"PROGRAMMER","names":["BUGHAPPY"]}
+{"title":"SALESMAN","names":["WHEELFOR","MERCHANT","BULLOZER","BANCROFT","FODDERMAN"]}
+{"title":"SCIENTIST","names":["BIGHEAD","BIGHORN"]}
+{"title":"SECRETARY","names":["MESSIFUL","HONEY","SHORTSIGHT","CHERRY","MONAPENNY"]}
+{"title":"TYPIST","names":["KITTY","PLUMHEAD"]}
+SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME))
+["0021",["STRONG","SHORTSIGHT"]]
+["0318",["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]]
+["0319",["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]]
+["2452",["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]]
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES)
+{"DEPARTMENT":"0021","NAMES":["STRONG","SHORTSIGHT"]}
+{"DEPARTMENT":"0318","NAMES":["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]}
+{"DEPARTMENT":"0319","NAMES":["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]}
+{"DEPARTMENT":"2452","NAMES":["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]}
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES)
+{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.00},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.00}]}
+{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.00},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.00},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.00},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.00},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.00},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.00},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.00}]}
+{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.00},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.00},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.00},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.45},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.00},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.00},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.00},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.00},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.50},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.00}]}
+{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.00},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.00},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.00},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.00},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.00}]}
+SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE;
+Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES)
+{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.00}]}
+{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.00}]}
+{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.00}]}
+{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.00}]}
+{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.00}]}
+{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.00},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.00}]}
+{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.00}]}
+{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.00}]}
+{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.00}]}
+{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.00}]}
+{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.00},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.00}]}
+{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.00}]}
+{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.00},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.00}]}
+{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.00},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.50}]}
+{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.45}]}
+{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.00},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.00}]}
+{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.00},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.00}]}
+{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.00}]}
+SELECT Bson_Object_Grp(SALARY) FROM t3;
+ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value)
+SELECT Bson_Object_Grp(NAME, SALARY) FROM t3;
+Bson_Object_Grp(NAME, SALARY)
+{"BANCROFT":9600.00,"SMITH":9000.00,"MERCHANT":8700.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"BIGHEAD":8000.00,"SHRINKY":7500.00,"WALTER":7400.00,"FODDERMAN":7000.00,"TONGHO":6800.00,"SHORTSIGHT":5500.00,"MESSIFUL":5000.50,"HONEY":4900.00,"GOOSEPEN":4700.00,"CHERRY":4500.00,"MONAPENNY":3800.00,"KITTY":3000.45,"PLUMHEAD":2800.00,"STRONG":23000.00,"BULLOZER":14800.00,"WERTHER":14500.00,"QUINN":14000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"BROWNY":10500.00,"WHEELFOR":10030.00,"MARTIN":10000.00}
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES")
+{"DEPARTMENT":"0021","SALARIES":{"STRONG":23000.00,"SHORTSIGHT":5500.00}}
+{"DEPARTMENT":"0318","SALARIES":{"BANCROFT":9600.00,"PLUMHEAD":2800.00,"HONEY":4900.00,"TONGHO":6800.00,"WALTER":7400.00,"SHRINKY":7500.00,"WERTHER":14500.00,"MERCHANT":8700.00,"WHEELFOR":10030.00}}
+{"DEPARTMENT":"0319","SALARIES":{"BULLOZER":14800.00,"QUINN":14000.00,"BROWNY":10500.00,"KITTY":3000.45,"MONAPENNY":3800.00,"MARTIN":10000.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"FODDERMAN":7000.00,"MESSIFUL":5000.50,"GOOSEPEN":4700.00}}
+{"DEPARTMENT":"2452","SALARIES":{"BIGHEAD":8000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"SMITH":9000.00,"CHERRY":4500.00}}
+SELECT Bson_Array_Grp(NAME) FROM t3;
+Bson_Array_Grp(NAME)
+["BANCROFT","SMITH","MERCHANT","FUNNIGUY","BUGHAPPY","BIGHEAD","SHRINKY","WALTER","FODDERMAN","TONGHO","SHORTSIGHT","MESSIFUL","HONEY","GOOSEPEN","CHERRY","MONAPENNY","KITTY","PLUMHEAD","STRONG","BULLOZER","WERTHER","QUINN","ORELLY","BIGHORN","BROWNY","WHEELFOR","MARTIN"]
+SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318;
+Bson_Object_Key(name, title)
+{"BANCROFT":"SALESMAN"}
+{"MERCHANT":"SALESMAN"}
+{"SHRINKY":"ADMINISTRATOR"}
+{"WALTER":"ENGINEER"}
+{"TONGHO":"ENGINEER"}
+{"HONEY":"SECRETARY"}
+{"PLUMHEAD":"TYPIST"}
+{"WERTHER":"DIRECTOR"}
+{"WHEELFOR":"SALESMAN"}
+SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318;
+Bson_Object_Grp(name, title)
+{"BANCROFT":"SALESMAN","MERCHANT":"SALESMAN","SHRINKY":"ADMINISTRATOR","WALTER":"ENGINEER","TONGHO":"ENGINEER","HONEY":"SECRETARY","PLUMHEAD":"TYPIST","WERTHER":"DIRECTOR","WHEELFOR":"SALESMAN"}
+#
+# Test value getting UDF's
+#
+SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[#]')
+27
+SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[","]')
+BANCROFT,SMITH,MERCHANT,FUNNIGUY,BUGHAPPY,BIGHEAD,SHRINKY,WALTER,FODDERMAN,TONGHO,SHORTSIGHT,MESSIFUL,HONEY,GOOSEPEN,CHERRY,MONAPENNY,KITTY,PLUMHEAD,STRONG,BULLOZER,WERTHER,QUINN,ORELLY,BIGHORN,BROWNY,WHEELFOR,MARTIN
+SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[>]')
+WHEELFOR
+SET @j1 = '[45,28,36,45,89]';
+SELECT BsonGet_String(@j1,'1');
+BsonGet_String(@j1,'1')
+28
+SELECT BsonGet_String(@j1 json_,'3');
+BsonGet_String(@j1 json_,'3')
+45
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3');
+BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3')
+45
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum";
+list egal sum
+45+28+36+45+89 = 243
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0');
+BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0')
+36
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*');
+BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*')
+[36,45,89]
+SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc');
+BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc')
+machin
+SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}';
+SELECT BsonGet_String(@j2 json_,'truc');
+BsonGet_String(@j2 json_,'truc')
+machin
+SELECT BsonGet_String(@j2,'truc');
+BsonGet_String(@j2,'truc')
+machin
+SELECT BsonGet_String(@j2,'chose');
+BsonGet_String(@j2,'chose')
+NULL
+SELECT BsonGet_String(NULL json_, NULL);
+BsonGet_String(NULL json_, NULL)
+NULL
+Warnings:
+Warning 1105
+/* NULL WARNING */
+SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500.00
+0318 72230.00
+0319 89800.95
+2452 45900.00
+SELECT BsonGet_Int(@j1, '4');
+BsonGet_Int(@j1, '4')
+89
+SELECT BsonGet_Int(@j1, '[#]');
+BsonGet_Int(@j1, '[#]')
+5
+SELECT BsonGet_Int(@j1, '[+]');
+BsonGet_Int(@j1, '[+]')
+243
+SELECT BsonGet_Int(@j1 json_, '3');
+BsonGet_Int(@j1 json_, '3')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]')
+243
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0')
+36
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1');
+BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1')
+28
+SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty');
+BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty')
+56
+SELECT BsonGet_Int(@j2 json_, 'price');
+BsonGet_Int(@j2 json_, 'price')
+3
+SELECT BsonGet_Int(@j2, 'qty');
+BsonGet_Int(@j2, 'qty')
+56
+SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose')
+NULL
+SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum;
+sum
+170
+SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500
+0318 72230
+0319 89800
+2452 45900
+SELECT BsonGet_Real(@j1, '2');
+BsonGet_Real(@j1, '2')
+36.000000000000000
+SELECT BsonGet_Real(@j1 json_, '3', 2);
+BsonGet_Real(@j1 json_, '3', 2)
+45.00
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3')
+45.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]')
+45.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]')
+243.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]')
+48.600000000000000
+SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0')
+36.000000000000000
+SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price');
+BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price')
+3.141600000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty')
+56.000000000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price')
+3.141600000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4);
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4)
+3.1416
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose')
+NULL
+SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500.000000000000000
+0318 72230.000000000000000
+0319 89800.950000000000000
+2452 45900.000000000000000
+#
+# Documentation examples
+#
+SELECT
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank",
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number",
+BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat",
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum",
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg";
+Rank Number Concat Sum Avg
+89 5 45,28,36,45,89 243 48.60
+SELECT
+BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String",
+BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int",
+BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real";
+String Int Real
+29.50 29 29.500000000000000
+SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real";
+Real
+29.500
+#
+# Testing Locate
+#
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin');
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin')
+$.truc
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56);
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56)
+$.qty
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416);
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416)
+$.price
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose');
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose')
+NULL
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path;
+Path
+$.AUTHORS[1].FN
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path;
+Path
+$.AUTHORS[1].FN
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path;
+Path
+$.AUTHORS[1]
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path;
+Path
+NULL
+SELECT BsonLocate('[45,28,36,45,89]',36);
+BsonLocate('[45,28,36,45,89]',36)
+$[2]
+SELECT BsonLocate('[45,28,36,45,89]' json_,28.0);
+BsonLocate('[45,28,36,45,89]' json_,28.0)
+NULL
+SELECT Bson_Locate_All('[45,28,36,45,89]',10);
+Bson_Locate_All('[45,28,36,45,89]',10)
+[]
+SELECT Bson_Locate_All('[45,28,36,45,89]',45);
+Bson_Locate_All('[45,28,36,45,89]',45)
+["$[0]","$[3]"]
+SELECT Bson_Locate_All('[[45,28],36,45,89]',45);
+Bson_Locate_All('[[45,28],36,45,89]',45)
+["$[0][0]","$[2]"]
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45);
+Bson_Locate_All('[[45,28,45],36,45,89]',45)
+["$[0][0]","$[0][2]","$[2]"]
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'));
+Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'))
+["$[0][0]","$[0][2]","$[2]"]
+SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1;
+BsonLocate('[[45,28,45],36,45,89]',45,n)
+$[0][0]
+$[0][2]
+$[2]
+NULL
+NULL
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1;
+BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']'))
+$[0][0]
+$[0][2]
+$[2]
+NULL
+NULL
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL;
+Path
+$[0][0]
+$[0][2]
+$[2]
+SELECT Bson_Locate_All('[45,28,[36,45,89]]',45);
+Bson_Locate_All('[45,28,[36,45,89]]',45)
+["$[0]","$[2][1]"]
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0));
+Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0))
+[]
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0);
+Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0)
+["$[1][1]"]
+SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_);
+BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_)
+$[1]
+SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_);
+BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_)
+$[0]
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths";
+All paths
+[]
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_);
+Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_)
+["$[1][0]"]
+SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs";
+Nb of occurs
+2
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2);
+Bson_Locate_All('[[45,28],[[36,45],89]]',45,2)
+["$[0][0]"]
+SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0');
+BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0')
+$[0]
+SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab');
+BsonLocate(Bson_File('test/biblio.json'), 'Knab')
+$[0].AUTHOR[1].LASTNAME
+SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab');
+Bson_Locate_All('test/biblio.json' jfile_, 'Knab')
+["$[0].AUTHOR[1].LASTNAME"]
+#
+# Testing json files
+#
+SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},
+{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile;
+NewFile
+test/fx.json
+SELECT Bfile_Make('test/fx.json', 1);
+Bfile_Make('test/fx.json', 1)
+test/fx.json
+SELECT Bfile_Make('test/fx.json' jfile_);
+Bfile_Make('test/fx.json' jfile_)
+test/fx.json
+SELECT Bfile_Make(Bbin_File('test/fx.json'), 0);
+Bfile_Make(Bbin_File('test/fx.json'), 0)
+test/fx.json
+SELECT Bson_File('test/fx.json', 1);
+Bson_File('test/fx.json', 1)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+Warnings:
+Warning 1105 File pretty format doesn't match the specified pretty value
+SELECT Bson_File('test/fx.json', 2);
+Bson_File('test/fx.json', 2)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+Warnings:
+Warning 1105 File pretty format doesn't match the specified pretty value
+SELECT Bson_File('test/fx.json', 0);
+Bson_File('test/fx.json', 0)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+SELECT Bson_File('test/fx.json', '0');
+Bson_File('test/fx.json', '0')
+{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]}
+SELECT Bson_File('test/fx.json', '[?]');
+Bson_File('test/fx.json', '[?]')
+NULL
+Warnings:
+Warning 1105 Invalid function specification ?
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*');
+BsonGet_String(Bson_File('test/fx.json'), '1.*')
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1');
+BsonGet_String(Bson_File('test/fx.json'), '1')
+6 car roadster 56000 (6, 9)
+SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage;
+Mileage
+56000
+SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price;
+Price
+5.65
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings');
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings')
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4,6]}
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings');
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings')
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]}
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1);
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1)
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]}
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0);
+Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0)
+[6,2,4]
+SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1);
+Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1)
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2]}
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin);
+Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin)
+{"_id":7,"type":"food","item":"meat","origin":"france","ratings":[2,4]}
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size');
+Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size')
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]}
+Warnings:
+Warning 1105 No sub-item at 'size'
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size');
+Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size')
+{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":70},"ratings":[5,8,7]}
+SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size'));
+Bson_Object_List(Bson_File('test/fx.json', '3.size'))
+["W","L","H"]
+#
+# Testing new functions
+#
+SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result";
+Result
+["a","b","c","d","e","f"]
+SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result";
+Result
+["a","b","c","d","e","f"]
+SELECT
+Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set",
+Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert",
+Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update";
+Set Insert Update
+[1,"foo",3,{"quatre":4,"cinq":5}] [1,2,3,{"quatre":4,"cinq":5}] [1,"foo",3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux')
+[1,3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]')
+[1,3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux')
+[1,2,3,{"quatre":4}]
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+SELECT BsonSet_Grp_Size(10);
+BsonSet_Grp_Size(10)
+10
diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
index ec314c5f072..d895a9aed87 100644
--- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
@@ -3,7 +3,7 @@ command varchar(128) not null,
number int(5) not null flag=1,
message varchar(255) flag=2)
ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager,Execsrc=1';
+OPTION_LIST='User=system,Password=Choupy01,Execsrc=1';
SELECT * FROM t2 WHERE command = 'drop table employee';
command number message
drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist
@@ -23,14 +23,14 @@ Warnings:
Warning 1105 Affected rows
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1 WHERE table_name='employee';
Table_Cat Table_Schema Table_Name Table_Type Remark
NULL SYSTEM EMPLOYEE TABLE NULL
DROP TABLE t1;
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL
@@ -42,7 +42,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP
HOST 'jdbc:oracle:thin:@localhost:1521:xe',
DATABASE 'SYSTEM',
USER 'system',
-PASSWORD 'manager',
+PASSWORD 'Choupy01',
PORT 0,
SOCKET '',
OWNER 'SYSTEM');
diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result
index 6b6f40d2c47..dc527acd4a3 100644
--- a/storage/connect/mysql-test/connect/r/json.result
+++ b/storage/connect/mysql-test/connect/r/json.result
@@ -15,7 +15,7 @@ DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB
-9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
DROP TABLE t1;
#
@@ -24,15 +24,15 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+Authors INT(2) JPATH='$.AUTHOR[#]',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -46,16 +46,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -69,16 +69,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -176,17 +176,17 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15) NOT NULL,
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX',
-TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
-TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB',
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB',
INDEX IX(ISBN)
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
@@ -209,9 +209,9 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEK WHAT AMOUNT
@@ -230,9 +230,9 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEK WHAT AMOUNT
@@ -266,14 +266,14 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
-WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER',
-SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT',
-SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT',
-AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT',
-SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT',
-AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT',
-AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT',
-AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT')
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE
@@ -286,9 +286,9 @@ DROP TABLE t1;
#
CREATE TABLE t2 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
@@ -302,9 +302,9 @@ Janet 3 Food 18.00
Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
@@ -318,9 +318,9 @@ Beth 4 Beer 15.00
Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
@@ -374,8 +374,8 @@ DROP TABLE t1, t2, t3, t4;
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
@@ -390,8 +390,8 @@ Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
@@ -406,8 +406,8 @@ Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
@@ -425,8 +425,8 @@ Janet 5 Food 12.00
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
WHO WEEK WHAT AMOUNT
@@ -461,8 +461,8 @@ DROP TABLE t1;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result
index 47fc4abbd28..e0b08889f40 100644
--- a/storage/connect/mysql-test/connect/r/json_java_2.result
+++ b/storage/connect/mysql-test/connect/r/json_java_2.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result
index 720c82cd7f9..b9ba919507d 100644
--- a/storage/connect/mysql-test/connect/r/json_java_3.result
+++ b/storage/connect/mysql-test/connect/r/json_java_3.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result
index f9bfc01763e..482ccc85b57 100644
--- a/storage/connect/mysql-test/connect/r/json_mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024
OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089, 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745, 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451, 40.767691900000002647 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523, 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639, 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 40.767691900000002647 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.880382699999998408 40.764312400000001446 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.137728600000002643 40.611957199999999091 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.906850599999998508 40.619903399999998328 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.005288999999990551 40.628886000000001388 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.948260899999993967 40.640827100000002758 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856076999999999089, 40.848447000000000173) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.96170399999999745, 40.66294200000000103) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.985135599999992451, 40.767691900000002647) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.982419999999990523, 40.579504999999997494) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.860115199999995639, 40.731173900000001709) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.880382699999998408, 40.764312400000001446) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.137728600000002643, 40.611957199999999091) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.906850599999998508, 40.619903399999998328) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.005288999999990551, 40.628886000000001388) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.948260899999993967, 40.640827100000002758) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result
index 09544bb1ecb..8315fc3f3bf 100644
--- a/storage/connect/mysql-test/connect/r/json_udf.result
+++ b/storage/connect/mysql-test/connect/r/json_udf.result
@@ -187,11 +187,11 @@ DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT Json_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
Json_Make_Array(AUTHOR, TITLE, DATEPUB)
-["Jean-Christophe Bernadac","Construire une application XML",1999]
+[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999]
["William J. Pardi","XML en Action",1999]
SELECT Json_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
Json_Make_Object(AUTHOR, TITLE, DATEPUB)
-{"AUTHOR":"Jean-Christophe Bernadac","TITLE":"Construire une application XML","DATEPUB":1999}
+{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999}
{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999}
SELECT Json_Array_Grp(TITLE, DATEPUB) FROM t2;
ERROR HY000: Can't initialize function 'json_array_grp'; This function can only accept 1 argument
@@ -610,7 +610,7 @@ JsonGet_String(Json_File('test/fx.json'), '1.*')
{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}
SELECT JsonGet_String(Json_File('test/fx.json'), '1');
JsonGet_String(Json_File('test/fx.json'), '1')
-6 car roadster 56000 6 9
+6 car roadster 56000 (6, 9)
SELECT JsonGet_Int(Json_File('test/fx.json'), '1.mileage') AS Mileage;
Mileage
56000
diff --git a/storage/connect/mysql-test/connect/r/json_udf_bin.result b/storage/connect/mysql-test/connect/r/json_udf_bin.result
index d0819619c33..c20cf7ce632 100644
--- a/storage/connect/mysql-test/connect/r/json_udf_bin.result
+++ b/storage/connect/mysql-test/connect/r/json_udf_bin.result
@@ -87,7 +87,7 @@ Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv')
{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}
SELECT JsonGet_String(Json_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') lang;
lang
-GML
+GML, XML
SELECT Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') "See also";
See also
["GML","XML"]
diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result
index 132bb34ce64..8b86ce32943 100644
--- a/storage/connect/mysql-test/connect/r/mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/mongo_c.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` varchar(512) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 Morris Park Ave 10462 Bronx Bakery {"date":{"$date":1393804800000},"grade":"A","score":2} Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result
index bc186d7137e..cccda2760d6 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_2.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` char(99) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 [ -73.856077 , 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : "2014-03-03T00:00:00.000Z"} , "grade" : "A" , "score" : 2} Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result
index 30c696fc9eb..ae39148a156 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_3.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` char(84) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 [-73.856077, 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : 1393804800000 }, "grade" : "A", "score" : 2 } Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/odbc_oracle.result b/storage/connect/mysql-test/connect/r/odbc_oracle.result
index 8dc7dc07bb1..acb7d9a74c9 100644
--- a/storage/connect/mysql-test/connect/r/odbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/odbc_oracle.result
@@ -10,7 +10,7 @@ SET NAMES utf8;
# All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -20,7 +20,7 @@ NULL MTR V1 VIEW NULL
DROP TABLE t1;
# All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -30,7 +30,7 @@ NULL MTR V1 VIEW NULL
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -38,7 +38,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -46,7 +46,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -54,7 +54,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# All tables in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.%';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -68,7 +68,7 @@ DROP TABLE t1;
# All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -80,7 +80,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -91,7 +91,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
MTR V1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# All tables "T1" in all schemas (limited with WHERE)
-CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1';
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
MTR T1 A 3 DECIMAL 38 40 0 10 1
@@ -99,7 +99,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -108,7 +108,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -121,14 +121,14 @@ DROP TABLE t1;
# Table "T1" in the default schema ("MTR")
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='T1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1'
SELECT * FROM t1 ORDER BY A;
A B
10 1000000000
@@ -157,14 +157,14 @@ DROP VIEW v1;
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1'
SELECT * FROM t1;
A B
10 1000000000
@@ -173,14 +173,14 @@ A B
DROP TABLE t1;
# View "V1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.V1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1'
SELECT * FROM t1;
A B
10 1000000000
@@ -209,13 +209,13 @@ DROP VIEW v1;
DROP TABLE t1;
# Table "T2" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T2';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` varchar(64) DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2'
SELECT * FROM t1;
A
test
diff --git a/storage/connect/mysql-test/connect/r/rest.result b/storage/connect/mysql-test/connect/r/rest.result
new file mode 100644
index 00000000000..3c4ec80ce71
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/rest.result
@@ -0,0 +1,19 @@
+#
+# Testing REST query
+#
+CREATE TABLE t1
+ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+SELECT * FROM t1;
+id name username email address_street address_suite address_city address_zipcode address_geo_lat address_geo_lng phone website company_name company_catchPhrase company_bs
+1 Leanne Graham Bret Sincere@april.biz Kulas Light Apt. 556 Gwenborough 92998-3874 -37.3159 81.1496 1-770-736-8031 x56442 hildegard.org Romaguera-Crona Multi-layered client-server neural-net harness real-time e-markets
+2 Ervin Howell Antonette Shanna@melissa.tv Victor Plains Suite 879 Wisokyburgh 90566-7771 -43.9509 -34.4618 010-692-6593 x09125 anastasia.net Deckow-Crist Proactive didactic contingency synergize scalable supply-chains
+3 Clementine Bauch Samantha Nathan@yesenia.net Douglas Extension Suite 847 McKenziehaven 59590-4157 -68.6102 -47.0653 1-463-123-4447 ramiro.info Romaguera-Jacobson Face to face bifurcated interface e-enable strategic applications
+4 Patricia Lebsack Karianne Julianne.OConner@kory.org Hoeger Mall Apt. 692 South Elvis 53919-4257 29.4572 -164.2990 493-170-9623 x156 kale.biz Robel-Corkery Multi-tiered zero tolerance productivity transition cutting-edge web services
+5 Chelsey Dietrich Kamren Lucio_Hettinger@annie.ca Skiles Walks Suite 351 Roscoeview 33263 -31.8129 62.5342 (254)954-1289 demarco.info Keebler LLC User-centric fault-tolerant solution revolutionize end-to-end systems
+6 Mrs. Dennis Schulist Leopoldo_Corkery Karley_Dach@jasper.info Norberto Crossing Apt. 950 South Christy 23505-1337 -71.4197 71.7478 1-477-935-8478 x6430 ola.org Considine-Lockman Synchronised bottom-line interface e-enable innovative applications
+7 Kurtis Weissnat Elwyn.Skiles Telly.Hoeger@billy.biz Rex Trail Suite 280 Howemouth 58804-1099 24.8918 21.8984 210.067.6132 elvis.io Johns Group Configurable multimedia task-force generate enterprise e-tailers
+8 Nicholas Runolfsdottir V Maxime_Nienow Sherwood@rosamond.me Ellsworth Summit Suite 729 Aliyaview 45169 -14.3990 -120.7677 586.493.6943 x140 jacynthe.com Abernathy Group Implemented secondary concept e-enable extensible e-tailers
+9 Glenna Reichert Delphine Chaim_McDermott@dana.io Dayna Park Suite 449 Bartholomebury 76495-3109 24.6463 -168.8889 (775)976-6794 x41206 conrad.com Yost and Sons Switchable contextually-based project aggregate real-time technologies
+10 Clementina DuBuque Moriah.Stanton Rey.Padberg@karina.biz Kattie Turnpike Suite 198 Lebsackbury 31428-2261 -38.2386 57.2232 024-648-3804 ambrose.net Hoeger LLC Centralized empowering task-force target end-to-end models
+DROP TABLE t1;
diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result
index 6a0c9db27b3..575c903bbbc 100644
--- a/storage/connect/mysql-test/connect/r/xml.result
+++ b/storage/connect/mysql-test/connect/r/xml.result
@@ -374,8 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3);
Warnings:
Level Warning
Code 1105
-Message Com error: Unable to save character to 'iso-8859-1' encoding.
-
+Message warning about characters outside of iso-8859-1
INSERT INTO t1 VALUES ('&<>"\'');
SELECT node, hex(node) FROM t1;
node &<>"'
diff --git a/storage/connect/mysql-test/connect/r/xml2.result b/storage/connect/mysql-test/connect/r/xml2.result
index f7bbc17c8a0..891c6e6f8dd 100644
--- a/storage/connect/mysql-test/connect/r/xml2.result
+++ b/storage/connect/mysql-test/connect/r/xml2.result
@@ -87,9 +87,9 @@ DROP TABLE t1;
# Testing mixed tag and attribute values
#
CREATE TABLE t1 (
-ISBN CHAR(15) FIELD_FORMAT='@',
-LANG CHAR(2) FIELD_FORMAT='@',
-SUBJECT CHAR(32) FIELD_FORMAT='@',
+ISBN CHAR(15) XPATH='@',
+LANG CHAR(2) XPATH='@',
+SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -120,9 +120,9 @@ DROP TABLE t1;
# Testing INSERT on mixed tag and attribute values
#
CREATE TABLE t1 (
-ISBN CHAR(15) FIELD_FORMAT='@',
-LANG CHAR(2) FIELD_FORMAT='@',
-SUBJECT CHAR(32) FIELD_FORMAT='@',
+ISBN CHAR(15) XPATH='@',
+LANG CHAR(2) XPATH='@',
+SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -207,18 +207,18 @@ DROP TABLE t1;
# Testing XPath
#
CREATE TABLE t1 (
-isbn CHAR(15) FIELD_FORMAT='@ISBN',
-language CHAR(2) FIELD_FORMAT='@LANG',
-subject CHAR(32) FIELD_FORMAT='@SUBJECT',
-authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME',
-authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME',
-title CHAR(32) FIELD_FORMAT='TITLE',
-translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX',
-tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME',
-publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME',
-location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE',
-year INT(4) FIELD_FORMAT='DATEPUB'
+isbn CHAR(15) XPATH='@ISBN',
+language CHAR(2) XPATH='@LANG',
+subject CHAR(32) XPATH='@SUBJECT',
+authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME',
+authorln CHAR(20) XPATH='AUTHOR/LASTNAME',
+title CHAR(32) XPATH='TITLE',
+translated CHAR(32) XPATH='TRANSLATOR/@PREFIX',
+tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME',
+tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME',
+publisher CHAR(20) XPATH='PUBLISHER/NAME',
+location CHAR(20) XPATH='PUBLISHER/PLACE',
+year INT(4) XPATH='DATEPUB'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
@@ -260,7 +260,7 @@ DROP TABLE t1;
#
CREATE TABLE t1
(
-isbn CHAR(15) FIELD_FORMAT='@isbn'
+isbn CHAR(15) XPATH='@isbn'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/r/xml2_html.result b/storage/connect/mysql-test/connect/r/xml2_html.result
index 143f46529f6..499108b724d 100644
--- a/storage/connect/mysql-test/connect/r/xml2_html.result
+++ b/storage/connect/mysql-test/connect/r/xml2_html.result
@@ -5,9 +5,9 @@ SET NAMES utf8;
# Testing HTML like XML file
#
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/r/xml2_mult.result b/storage/connect/mysql-test/connect/r/xml2_mult.result
index 07c86d961e1..0146baa89c0 100644
--- a/storage/connect/mysql-test/connect/r/xml2_mult.result
+++ b/storage/connect/mysql-test/connect/r/xml2_mult.result
@@ -5,9 +5,9 @@ SET NAMES utf8;
# Testing expanded values
#
CREATE TABLE `bookstore` (
-`category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+`category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
-`lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+`lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/r/xml2_zip.result b/storage/connect/mysql-test/connect/r/xml2_zip.result
index f176149c53f..e743af32418 100644
--- a/storage/connect/mysql-test/connect/r/xml2_zip.result
+++ b/storage/connect/mysql-test/connect/r/xml2_zip.result
@@ -4,20 +4,20 @@ Warning 1105 No file name. Table will use t1.xml
# Testing zipped XML tables
#
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
ISBN 9782212090819
LANG fr
@@ -69,7 +69,7 @@ PUBLISHER_PLACE Paris
DATEPUB 2003
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=libxml2';
+OPTION_LIST='depth=0,xmlsup=libxml2';
SELECT * FROM t2;
ISBN 9782212090819
LANG fr
diff --git a/storage/connect/mysql-test/connect/r/xml_html.result b/storage/connect/mysql-test/connect/r/xml_html.result
index 4b984a49901..308c67ffc28 100644
--- a/storage/connect/mysql-test/connect/r/xml_html.result
+++ b/storage/connect/mysql-test/connect/r/xml_html.result
@@ -3,9 +3,9 @@ SET NAMES utf8;
# Testing HTML like XML file
#
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/r/xml_mult.result b/storage/connect/mysql-test/connect/r/xml_mult.result
index c786a80819c..9cdc36dea6b 100644
--- a/storage/connect/mysql-test/connect/r/xml_mult.result
+++ b/storage/connect/mysql-test/connect/r/xml_mult.result
@@ -3,9 +3,9 @@ SET NAMES utf8;
# Testing expanded values
#
CREATE TABLE `bookstore` (
-`category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+`category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
-`lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+`lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result
index f7790e4cfff..5f17249b390 100644
--- a/storage/connect/mysql-test/connect/r/xml_zip.result
+++ b/storage/connect/mysql-test/connect/r/xml_zip.result
@@ -2,20 +2,20 @@
# Testing zipped XML tables
#
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
ISBN 9782212090819
LANG fr
@@ -67,7 +67,7 @@ PUBLISHER_PLACE Paris
DATEPUB 2003
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=domdoc';
+OPTION_LIST='depth=0,xmlsup=domdoc';
SELECT * FROM t2;
ISBN 9782212090819
LANG fr
diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result
index c81546a4689..c696252ca43 100644
--- a/storage/connect/mysql-test/connect/r/zip.result
+++ b/storage/connect/mysql-test/connect/r/zip.result
@@ -171,32 +171,32 @@ DROP TABLE t1,t2,t3,t4;
#
CREATE TABLE t1 (
_id INT(2) NOT NULL,
-name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+name_first CHAR(9) NOT NULL JPATH='$.name.first',
+name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+name_last CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth CHAR(20) DEFAULT NULL,
death CHAR(20) DEFAULT NULL,
-contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs',
-awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award',
-awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year',
-awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by'
+contribs CHAR(50) NOT NULL JPATH='$.contribs',
+awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award',
+awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year',
+awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by'
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
SELECT * FROM t1;
_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
-1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
-2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
-3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
-4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
-5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp, Artificial Intelligence, ALGOL Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC, compiler, FLOW-MATIC, COBOL Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association
6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
-7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX, C Turing Award 1983 ACM
8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
-OPTION_LIST='LEVEL=5';
+OPTION_LIST='DEPTH=5';
SELECT * FROM t2;
_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
@@ -211,16 +211,16 @@ _id name_first name_aka name_last title birth death contribs awards_award awards
10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
CREATE TABLE t3 (
_id INT(2) NOT NULL,
-firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+firstname CHAR(9) NOT NULL JPATH='$.name.first',
+aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+lastname CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
-contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]',
-award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award',
-year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year',
-`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by'
+contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]',
+award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award',
+year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year',
+`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by'
) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
SELECT * FROM t3 WHERE _id = 1;
_id firstname aka lastname title birth death contribs award year by
diff --git a/storage/connect/mysql-test/connect/t/alter_xml.test b/storage/connect/mysql-test/connect/t/alter_xml.test
index 8b2164d5548..4c2e1670f4c 100644
--- a/storage/connect/mysql-test/connect/t/alter_xml.test
+++ b/storage/connect/mysql-test/connect/t/alter_xml.test
@@ -21,7 +21,7 @@ SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
diff --git a/storage/connect/mysql-test/connect/t/alter_xml2.test b/storage/connect/mysql-test/connect/t/alter_xml2.test
index d67c80c4e9f..ec4065baa47 100644
--- a/storage/connect/mysql-test/connect/t/alter_xml2.test
+++ b/storage/connect/mysql-test/connect/t/alter_xml2.test
@@ -21,7 +21,7 @@ SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test
new file mode 100644
index 00000000000..ab38cab73fc
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson.test
@@ -0,0 +1,294 @@
+--source include/not_embedded.inc
+--source include/have_partition.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
+--copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json
+--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json
+
+--echo #
+--echo # Testing doc samples
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ LANG CHAR(2),
+ SUBJECT CHAR(32),
+ AUTHOR CHAR(64),
+ TITLE CHAR(32),
+ TRANSLATION CHAR(32),
+ TRANSLATOR CHAR(80),
+ PUBLISHER CHAR(32),
+ DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Testing Jpath. Get the number of authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ Authors INT(2) JPATH='$.AUTHOR[#]',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Concatenates the authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing expanding authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+
+--echo #
+--echo # To add an author a new table must be created
+--echo #
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR';
+SELECT * FROM t2;
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo #
+--echo # Check the biblio file has the good format
+--echo #
+CREATE TABLE t1
+(
+ line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing a pretty=0 file
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15) NOT NULL,
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+ TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+ TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB',
+ INDEX IX(ISBN)
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
+SHOW INDEX FROM t1;
+SELECT * FROM t1;
+DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819';
+--error ER_GET_ERRMSG
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819';
+DROP TABLE t1;
+
+--echo #
+--echo # A file with 2 arrays
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Now it can be fully expanded
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+#--error ER_GET_ERRMSG
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # A table showing many calculated results
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12) NOT NULL,
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Expand expense in 3 one week tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t4;
+
+--echo #
+--echo # The expanded table is made as a TBL table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+DROP TABLE t1, t2, t3, t4;
+
+--echo #
+--echo # Three partial JSON tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json';
+SELECT * FROM t4;
+
+--echo #
+--echo # The complete table can be a multiple JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+DROP TABLE t1;
+
+--echo #
+--echo # Or also a partition JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+SHOW WARNINGS;
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE WEEK = 4;
+DROP TABLE t1, t2, t3, t4;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/biblio.json
+--remove_file $MYSQLD_DATADIR/test/bib0.dnx
+--remove_file $MYSQLD_DATADIR/test/bib0.json
+--remove_file $MYSQLD_DATADIR/test/expense.json
+--remove_file $MYSQLD_DATADIR/test/mulexp3.json
+--remove_file $MYSQLD_DATADIR/test/mulexp4.json
+--remove_file $MYSQLD_DATADIR/test/mulexp5.json
diff --git a/storage/connect/mysql-test/connect/t/bson_java_2.test b/storage/connect/mysql-test/connect/t/bson_java_2.test
new file mode 100644
index 00000000000..2188d9c2c91
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_java_2.test
@@ -0,0 +1,14 @@
+-- source jdbconn.inc
+-- source mongo.inc
+
+--disable_query_log
+eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar';
+set connect_json_all_path=0;
+--enable_query_log
+let $DRV= Java;
+let $VERS= 2;
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096;
+
+-- source mongo_test.inc
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_java_3.test b/storage/connect/mysql-test/connect/t/bson_java_3.test
new file mode 100644
index 00000000000..e7dd90b3563
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_java_3.test
@@ -0,0 +1,14 @@
+-- source jdbconn.inc
+-- source mongo.inc
+
+--disable_query_log
+eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar';
+set connect_json_all_path=0;
+--enable_query_log
+let $DRV= Java;
+let $VERS= 3;
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096;
+
+-- source mongo_test.inc
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_mongo_c.test b/storage/connect/mysql-test/connect/t/bson_mongo_c.test
new file mode 100644
index 00000000000..938d77c7c95
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_mongo_c.test
@@ -0,0 +1,10 @@
+-- source mongo.inc
+
+let $DRV= C;
+let $VERS= 0;
+let $PROJ= {"projection":;
+let $ENDP= };
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=1024;
+
+-- source mongo_test.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_udf.inc b/storage/connect/mysql-test/connect/t/bson_udf.inc
new file mode 100644
index 00000000000..c4722722ef7
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf.inc
@@ -0,0 +1,72 @@
+--disable_query_log
+#
+# Check if server has support for loading plugins
+#
+if (`SELECT @@have_dynamic_loading != 'YES'`) {
+ --skip UDF requires dynamic loading
+}
+if (!$HA_CONNECT_SO) {
+ --skip Needs a dynamically built ha_connect.so
+}
+
+--eval CREATE FUNCTION bson_test RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonvalue RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_make_array RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_make_object RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_key RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_list RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonset_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonset_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bson_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bson_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonlocate RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_contains RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsoncontains_path RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_get_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_string RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_int RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_real RETURNS REAL SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_set_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_update_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_file RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_serialize RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_make RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_convert RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_bjson RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_make_array RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bbin_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bbin_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_make_object RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_key RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_list RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_get_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_set_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_update_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_file RETURNS STRING SONAME '$HA_CONNECT_SO';
+
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test
new file mode 100644
index 00000000000..0da2de38864
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf.test
@@ -0,0 +1,282 @@
+--source bson_udf.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
+--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/test/employee.dat
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5;
+
+--echo #
+--echo # Test UDF's with constant arguments
+--echo #
+--error ER_CANT_INITIALIZE_UDF
+SELECT BsonValue(56, 3.1416, 'foo', NULL);
+SELECT BsonValue(3.1416);
+SELECT BsonValue(-80);
+SELECT BsonValue('foo');
+SELECT BsonValue(9223372036854775807);
+SELECT BsonValue(NULL);
+SELECT BsonValue(TRUE);
+SELECT BsonValue(FALSE);
+SELECT BsonValue();
+SELECT BsonValue('[11, 22, 33]' json_) FROM t1;
+#
+SELECT Bson_Make_Array();
+SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL);
+SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE);
+#
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array;
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array;
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(BsonValue('one value'), 'One more');
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add('one value', 'One more');
+SELECT Bson_Array_Add('one value' json_, 'One more');
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(5 json_, 'One more');
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0);
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array;
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]');
+#
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1;
+SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array;
+#
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0);
+SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2);
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2');
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); /* WARNING VOID */
+#
+SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL);
+SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty);
+SELECT Bson_Make_Object();
+SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL);
+SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL);
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL);
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty');
+#
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color);
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price);
+SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1);
+#
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc');
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose');
+#
+SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List";
+SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List";
+SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List";
+
+--echo #
+--echo # Test UDF's with column arguments
+--echo #
+SELECT Bsonset_Def_Prec(2);
+CREATE TABLE t2
+(
+ ISBN CHAR(15),
+ LANG CHAR(2),
+ SUBJECT CHAR(32),
+ AUTHOR CHAR(64),
+ TITLE CHAR(32),
+ TRANSLATION CHAR(32),
+ TRANSLATOR CHAR(80),
+ PUBLISHER CHAR(32),
+ DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+
+SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
+SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2;
+SELECT Bson_Array_Grp(TITLE) FROM t2;
+
+CREATE TABLE t3 (
+ SERIALNO CHAR(5) NOT NULL,
+ NAME VARCHAR(12) NOT NULL FLAG=6,
+ SEX SMALLINT(1) NOT NULL,
+ TITLE VARCHAR(15) NOT NULL FLAG=20,
+ MANAGER CHAR(5) DEFAULT NULL,
+ DEPARTMENT CHAr(4) NOT NULL FLAG=41,
+ SECRETARY CHAR(5) DEFAULT NULL FLAG=46,
+ SALARY DOUBLE(8,2) NOT NULL FLAG=52
+) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1;
+
+SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT';
+SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT;
+#SET connect_json_grp_size=30; Deprecated
+SELECT BsonSet_Grp_Size(30);
+SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title;
+SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE;
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Object_Grp(SALARY) FROM t3;
+SELECT Bson_Object_Grp(NAME, SALARY) FROM t3;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Array_Grp(NAME) FROM t3;
+#
+SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318;
+SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318;
+
+--echo #
+--echo # Test value getting UDF's
+--echo #
+SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3;
+SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3;
+SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3;
+SET @j1 = '[45,28,36,45,89]';
+SELECT BsonGet_String(@j1,'1');
+SELECT BsonGet_String(@j1 json_,'3');
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3');
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum";
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0');
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*');
+SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc');
+SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}';
+SELECT BsonGet_String(@j2 json_,'truc');
+SELECT BsonGet_String(@j2,'truc');
+SELECT BsonGet_String(@j2,'chose');
+SELECT BsonGet_String(NULL json_, NULL); /* NULL WARNING */
+SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+#
+SELECT BsonGet_Int(@j1, '4');
+SELECT BsonGet_Int(@j1, '[#]');
+SELECT BsonGet_Int(@j1, '[+]');
+SELECT BsonGet_Int(@j1 json_, '3');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]');
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1');
+SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty');
+SELECT BsonGet_Int(@j2 json_, 'price');
+SELECT BsonGet_Int(@j2, 'qty');
+SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum;
+SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+#
+SELECT BsonGet_Real(@j1, '2');
+SELECT BsonGet_Real(@j1 json_, '3', 2);
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]');
+SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4);
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+
+--echo #
+--echo # Documentation examples
+--echo #
+SELECT
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank",
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number",
+ BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat",
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum",
+ BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg";
+SELECT
+ BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String",
+ BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int",
+ BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real";
+SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real";
+
+--echo #
+--echo # Testing Locate
+--echo #
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin');
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56);
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416);
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose');
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path;
+SELECT BsonLocate('[45,28,36,45,89]',36);
+SELECT BsonLocate('[45,28,36,45,89]' json_,28.0);
+SELECT Bson_Locate_All('[45,28,36,45,89]',10);
+SELECT Bson_Locate_All('[45,28,36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28],36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'));
+SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1;
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1;
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL;
+SELECT Bson_Locate_All('[45,28,[36,45,89]]',45);
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0));
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0);
+SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_);
+SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_);
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths";
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_);
+SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs";
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2);
+SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0');
+SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab');
+SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab');
+
+--echo #
+--echo # Testing json files
+--echo #
+SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},
+{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile;
+SELECT Bfile_Make('test/fx.json', 1);
+SELECT Bfile_Make('test/fx.json' jfile_);
+SELECT Bfile_Make(Bbin_File('test/fx.json'), 0);
+SELECT Bson_File('test/fx.json', 1);
+SELECT Bson_File('test/fx.json', 2);
+SELECT Bson_File('test/fx.json', 0);
+SELECT Bson_File('test/fx.json', '0');
+SELECT Bson_File('test/fx.json', '[?]');
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*');
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1');
+SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage;
+SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price;
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings');
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings');
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1);
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0);
+SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1);
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin);
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size');
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size');
+SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size'));
+
+--echo #
+--echo # Testing new functions
+--echo #
+SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result";
+SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result";
+SELECT
+Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set",
+Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert",
+Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update";
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux');
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]');
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux');
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+SELECT BsonSet_Grp_Size(10);
+
+#
+# Clean up
+#
+--source bson_udf2.inc
+--remove_file $MYSQLD_DATADIR/test/biblio.json
+--remove_file $MYSQLD_DATADIR/test/employee.dat
+--remove_file $MYSQLD_DATADIR/test/fx.json
+
diff --git a/storage/connect/mysql-test/connect/t/bson_udf2.inc b/storage/connect/mysql-test/connect/t/bson_udf2.inc
new file mode 100644
index 00000000000..d06d7fac435
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf2.inc
@@ -0,0 +1,63 @@
+--disable_query_log
+
+DROP FUNCTION bson_test;
+DROP FUNCTION bsonvalue;
+DROP FUNCTION bson_make_array;
+DROP FUNCTION bson_array_add_values;
+DROP FUNCTION bson_array_add;
+DROP FUNCTION bson_array_delete;
+DROP FUNCTION bson_make_object;
+DROP FUNCTION bson_object_nonull;
+DROP FUNCTION bson_object_key;
+DROP FUNCTION bson_object_add;
+DROP FUNCTION bson_object_delete;
+DROP FUNCTION bson_object_list;
+DROP FUNCTION bson_object_values;
+DROP FUNCTION bsonset_def_prec;
+DROP FUNCTION bsonget_def_prec;
+DROP FUNCTION bsonset_grp_size;
+DROP FUNCTION bsonget_grp_size;
+DROP FUNCTION bson_array_grp;
+DROP FUNCTION bson_object_grp;
+DROP FUNCTION bsonlocate;
+DROP FUNCTION bson_locate_all;
+DROP FUNCTION bson_contains;
+DROP FUNCTION bsoncontains_path;
+DROP FUNCTION bson_item_merge;
+DROP FUNCTION bson_get_item;
+DROP FUNCTION bson_delete_item;
+DROP FUNCTION bsonget_string;
+DROP FUNCTION bsonget_int;
+DROP FUNCTION bsonget_real;
+DROP FUNCTION bson_set_item;
+DROP FUNCTION bson_insert_item;
+DROP FUNCTION bson_update_item;
+DROP FUNCTION bson_serialize;
+DROP FUNCTION bson_file;
+DROP FUNCTION bfile_make;
+DROP FUNCTION bfile_convert;
+DROP FUNCTION bfile_bjson;
+DROP FUNCTION bbin_make_array;
+DROP FUNCTION bbin_array_add;
+DROP FUNCTION bbin_array_add_values;
+DROP FUNCTION bbin_array_delete;
+DROP FUNCTION bbin_array_grp;
+DROP FUNCTION bbin_object_grp;
+DROP FUNCTION bbin_make_object;
+DROP FUNCTION bbin_object_nonull;
+DROP FUNCTION bbin_object_key;
+DROP FUNCTION bbin_object_add;
+DROP FUNCTION bbin_object_delete;
+DROP FUNCTION bbin_object_list;
+DROP FUNCTION bbin_object_values;
+DROP FUNCTION bbin_get_item;
+DROP FUNCTION bbin_set_item;
+DROP FUNCTION bbin_insert_item;
+DROP FUNCTION bbin_update_item;
+DROP FUNCTION bbin_item_merge;
+DROP FUNCTION bbin_delete_item;
+DROP FUNCTION bbin_locate_all;
+DROP FUNCTION bbin_file;
+
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/ini_grant.result b/storage/connect/mysql-test/connect/t/ini_grant.result
deleted file mode 100644
index 96d5e192c7d..00000000000
--- a/storage/connect/mysql-test/connect/t/ini_grant.result
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Checking FILE privileges
-#
-set sql_mode="";
-GRANT ALL PRIVILEGES ON *.* TO user@localhost;
-REVOKE FILE ON *.* FROM user@localhost;
-set sql_mode=default;
-connect user,localhost,user,,;
-connection user;
-SELECT user();
-user()
-user@localhost
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI;
-Warnings:
-Warning 1105 No file name. Table will use t1.ini
-INSERT INTO t1 VALUES ('sec1','val1');
-SELECT * FROM t1;
-sec val
-sec1 val1
-UPDATE t1 SET val='val11';
-SELECT * FROM t1;
-sec val
-sec1 val11
-DELETE FROM t1;
-SELECT * FROM t1;
-sec val
-INSERT INTO t1 VALUES('sec2','val2');
-TRUNCATE TABLE t1;
-SELECT * FROM t1;
-sec val
-CREATE VIEW v1 AS SELECT * FROM t1;
-SELECT * FROM v1;
-sec val
-DROP VIEW v1;
-DROP TABLE t1;
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-connection default;
-SELECT user();
-user()
-root@localhost
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT';
-INSERT INTO t1 VALUES ('sec1','val1');
-connection user;
-SELECT user();
-user()
-user@localhost
-INSERT INTO t1 VALUES ('sec2','val2');
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-SELECT * FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-UPDATE t1 SET val='val11';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DELETE FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-TRUNCATE TABLE t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-ALTER TABLE t1 READONLY=1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DROP TABLE t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-CREATE VIEW v1 AS SELECT * FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-# Testing a VIEW created with FILE privileges but accessed with no FILE
-connection default;
-SELECT user();
-user()
-root@localhost
-CREATE SQL SECURITY INVOKER VIEW v1 AS SELECT * FROM t1;
-connection user;
-SELECT user();
-user()
-user@localhost
-SELECT * FROM v1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-INSERT INTO v1 VALUES ('sec3','val3');
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-UPDATE v1 SET val='val11';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DELETE FROM v1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-disconnect user;
-connection default;
-DROP VIEW v1;
-DROP TABLE t1;
-DROP USER user@localhost;
-#
-# Checking FILE privileges: done
-#
diff --git a/storage/connect/mysql-test/connect/t/jdbc_oracle.test b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
index 10cb7a7b77d..1316352d4f5 100644
--- a/storage/connect/mysql-test/connect/t/jdbc_oracle.test
+++ b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
@@ -8,20 +8,20 @@ CREATE TABLE t2 (
number int(5) not null flag=1,
message varchar(255) flag=2)
ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager,Execsrc=1';
+OPTION_LIST='User=system,Password=Choupy01,Execsrc=1';
SELECT * FROM t2 WHERE command = 'drop table employee';
SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))';
SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1 WHERE table_name='employee';
DROP TABLE t1;
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1;
DROP TABLE t1;
@@ -32,7 +32,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP
HOST 'jdbc:oracle:thin:@localhost:1521:xe',
DATABASE 'SYSTEM',
USER 'system',
-PASSWORD 'manager',
+PASSWORD 'Choupy01',
PORT 0,
SOCKET '',
OWNER 'SYSTEM');
diff --git a/storage/connect/mysql-test/connect/t/json.test b/storage/connect/mysql-test/connect/t/json.test
index 018489525f7..8b42ef9cfab 100644
--- a/storage/connect/mysql-test/connect/t/json.test
+++ b/storage/connect/mysql-test/connect/t/json.test
@@ -35,15 +35,15 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ Authors INT(2) JPATH='$.AUTHOR[#]',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -55,16 +55,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -76,16 +76,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -122,17 +122,17 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15) NOT NULL,
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX',
- TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
- TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB',
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+ TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+ TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB',
INDEX IX(ISBN)
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
@@ -148,9 +148,9 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
@@ -160,9 +160,9 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
#--error ER_GET_ERRMSG
SELECT * FROM t1;
@@ -173,14 +173,14 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
-WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER',
-SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT',
-SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT',
-AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT',
-SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT',
-AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT',
-AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT',
-AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT')
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
@@ -190,25 +190,25 @@ DROP TABLE t1;
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
@@ -230,24 +230,24 @@ DROP TABLE t1, t2, t3, t4;
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
@@ -257,8 +257,8 @@ SELECT * FROM t4;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
DROP TABLE t1;
@@ -269,8 +269,8 @@ DROP TABLE t1;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
diff --git a/storage/connect/mysql-test/connect/t/json_java_2.test b/storage/connect/mysql-test/connect/t/json_java_2.test
index 2f64d8e2eed..03202828bb1 100644
--- a/storage/connect/mysql-test/connect/t/json_java_2.test
+++ b/storage/connect/mysql-test/connect/t/json_java_2.test
@@ -3,6 +3,7 @@
--disable_query_log
eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar';
+set connect_json_all_path=0;
--enable_query_log
let $DRV= Java;
let $VERS= 2;
diff --git a/storage/connect/mysql-test/connect/t/json_java_3.test b/storage/connect/mysql-test/connect/t/json_java_3.test
index cee8343772a..238808a833f 100644
--- a/storage/connect/mysql-test/connect/t/json_java_3.test
+++ b/storage/connect/mysql-test/connect/t/json_java_3.test
@@ -3,6 +3,7 @@
--disable_query_log
eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar';
+set connect_json_all_path=0;
--enable_query_log
let $DRV= Java;
let $VERS= 3;
diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc
index 357fa55240b..6e7c78e81ac 100644
--- a/storage/connect/mysql-test/connect/t/mongo_test.inc
+++ b/storage/connect/mysql-test/connect/t/mongo_test.inc
@@ -1,9 +1,10 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
--echo #
--echo # Test the MONGO table type
--echo #
-eval CREATE TABLE t1 (Document varchar(1024) field_format='*')
+eval CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants $CONN
OPTION_LIST='Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -13,7 +14,7 @@ DROP TABLE t1;
--echo # Test catfunc
--echo #
eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN;
+OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN;
SELECT * from t1;
DROP TABLE t1;
@@ -36,7 +37,7 @@ DROP TABLE t1;
--echo # Test discovery
--echo #
eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
SELECT * FROM t1 LIMIT 5;
DROP TABLE t1;
@@ -58,12 +59,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN;
@@ -125,6 +126,10 @@ IF ($TYPE == JSON)
{
SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
}
+IF ($TYPE == BSON)
+{
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+}
DROP TABLE t1;
--echo #
@@ -156,8 +161,8 @@ DROP TABLE t1;
eval CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
- loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
- loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+ loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+ loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=$TYPE TABNAME='cities'
@@ -181,11 +186,11 @@ DROP TABLE t1;
eval CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
- prices_0 INT(6) FIELD_FORMAT='prices.0',
- prices_1 INT(6) FIELD_FORMAT='prices.1',
- prices_2 INT(6) FIELD_FORMAT='prices.2',
- prices_3 INT(6) FIELD_FORMAT='prices.3',
- prices_4 INT(6) FIELD_FORMAT='prices.4')
+ prices_0 INT(6) JPATH='prices.0',
+ prices_1 INT(6) JPATH='prices.1',
+ prices_2 INT(6) JPATH='prices.2',
+ prices_3 INT(6) JPATH='prices.3',
+ prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/t/odbc_oracle.test b/storage/connect/mysql-test/connect/t/odbc_oracle.test
index 9de742a2647..18d29f69f1a 100644
--- a/storage/connect/mysql-test/connect/t/odbc_oracle.test
+++ b/storage/connect/mysql-test/connect/t/odbc_oracle.test
@@ -78,42 +78,42 @@ SET NAMES utf8;
--echo # All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.%';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
@@ -127,7 +127,7 @@ DROP TABLE t1;
--echo # All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns;
# Disable warnings to avoid "Result limited to 20000 lines"
--disable_warnings
@@ -137,7 +137,7 @@ DROP TABLE t1;
--echo # All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.%';
# Disable warnings to avoid "Result limited to 20000 lines"
--disable_warnings
@@ -146,20 +146,20 @@ SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (limited with WHERE)
-CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1';
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
@@ -172,7 +172,7 @@ DROP TABLE t1;
--echo # Table "T1" in the default schema ("MTR")
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='T1';
SHOW CREATE TABLE t1;
SELECT * FROM t1 ORDER BY A;
@@ -189,7 +189,7 @@ DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T1';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
@@ -197,7 +197,7 @@ DROP TABLE t1;
--echo # View "V1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.V1';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
@@ -214,7 +214,7 @@ DROP TABLE t1;
--echo # Table "T2" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T2';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/rest.inc b/storage/connect/mysql-test/connect/t/rest.inc
new file mode 100644
index 00000000000..6848e4b6965
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/rest.inc
@@ -0,0 +1,17 @@
+--disable_query_log
+--error 0,ER_UNKNOWN_ERROR
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+
+if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
+ WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+ AND ENGINE='CONNECT'
+ AND CREATE_OPTIONS LIKE "%`table_type`='JSON'%"`)
+{
+ DROP TABLE IF EXISTS t1;
+ Skip Need Curl or Casablanca;
+}
+DROP TABLE t1;
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/rest.test b/storage/connect/mysql-test/connect/t/rest.test
new file mode 100644
index 00000000000..67066ed4639
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/rest.test
@@ -0,0 +1,17 @@
+--source rest.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--echo #
+--echo # Testing REST query
+--echo #
+CREATE TABLE t1
+ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/users.json
diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test
index 0fdf8e90b6e..e837ec79604 100644
--- a/storage/connect/mysql-test/connect/t/xml.test
+++ b/storage/connect/mysql-test/connect/t/xml.test
@@ -300,6 +300,7 @@ CREATE TABLE t1 (node VARCHAR(50))
ENGINE=connect TABLE_TYPE=xml FILE_NAME='t1.xml'
OPTION_LIST='xmlsup=domdoc,rownode=line,encoding=iso-8859-1';
INSERT INTO t1 VALUES (_latin1 0xC0C1C2C3);
+--replace_regex /.*iso-8859-1.*/warning about characters outside of iso-8859-1/
INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3);
INSERT INTO t1 VALUES ('&<>"\'');
SELECT node, hex(node) FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/xml2.test b/storage/connect/mysql-test/connect/t/xml2.test
index 7bbc3dbd87c..9c5f685d399 100644
--- a/storage/connect/mysql-test/connect/t/xml2.test
+++ b/storage/connect/mysql-test/connect/t/xml2.test
@@ -77,9 +77,9 @@ DROP TABLE t1;
--echo # Testing mixed tag and attribute values
--echo #
CREATE TABLE t1 (
- ISBN CHAR(15) FIELD_FORMAT='@',
- LANG CHAR(2) FIELD_FORMAT='@',
- SUBJECT CHAR(32) FIELD_FORMAT='@',
+ ISBN CHAR(15) XPATH='@',
+ LANG CHAR(2) XPATH='@',
+ SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -98,9 +98,9 @@ DROP TABLE t1;
--copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml
--chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml
CREATE TABLE t1 (
- ISBN CHAR(15) FIELD_FORMAT='@',
- LANG CHAR(2) FIELD_FORMAT='@',
- SUBJECT CHAR(32) FIELD_FORMAT='@',
+ ISBN CHAR(15) XPATH='@',
+ LANG CHAR(2) XPATH='@',
+ SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -123,18 +123,18 @@ DROP TABLE t1;
--echo # Testing XPath
--echo #
CREATE TABLE t1 (
- isbn CHAR(15) FIELD_FORMAT='@ISBN',
- language CHAR(2) FIELD_FORMAT='@LANG',
- subject CHAR(32) FIELD_FORMAT='@SUBJECT',
- authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME',
- authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME',
- title CHAR(32) FIELD_FORMAT='TITLE',
- translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX',
- tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
- tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME',
- publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME',
- location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE',
- year INT(4) FIELD_FORMAT='DATEPUB'
+ isbn CHAR(15) XPATH='@ISBN',
+ language CHAR(2) XPATH='@LANG',
+ subject CHAR(32) XPATH='@SUBJECT',
+ authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME',
+ authorln CHAR(20) XPATH='AUTHOR/LASTNAME',
+ title CHAR(32) XPATH='TITLE',
+ translated CHAR(32) XPATH='TRANSLATOR/@PREFIX',
+ tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME',
+ tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME',
+ publisher CHAR(20) XPATH='PUBLISHER/NAME',
+ location CHAR(20) XPATH='PUBLISHER/PLACE',
+ year INT(4) XPATH='DATEPUB'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
@@ -150,8 +150,8 @@ DROP TABLE t1;
#--echo # Relative paths are not supported
#--echo #
#CREATE TABLE t1 (
-# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME',
-# authorln CHAR(20) FIELD_FORMAT='//LASTNAME'
+# authorfn CHAR(20) XPATH='//FIRSTNAME',
+# authorln CHAR(20) XPATH='//LASTNAME'
#) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
# TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1';
#SELECT * FROM t1;
@@ -165,8 +165,8 @@ DROP TABLE t1;
#--echo # Absolute path is not supported
#--echo #
#CREATE TABLE t1 (
-# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME',
-# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME'
+# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME',
+# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME'
#) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
# TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1';
#SELECT * FROM t1;
@@ -178,7 +178,7 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1
(
- isbn CHAR(15) FIELD_FORMAT='@isbn'
+ isbn CHAR(15) XPATH='@isbn'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/xml2_html.test b/storage/connect/mysql-test/connect/t/xml2_html.test
index 1c84b46ec38..2f4fc50e5e6 100644
--- a/storage/connect/mysql-test/connect/t/xml2_html.test
+++ b/storage/connect/mysql-test/connect/t/xml2_html.test
@@ -11,9 +11,9 @@ SET NAMES utf8;
--echo # Testing HTML like XML file
--echo #
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/t/xml2_mult.test b/storage/connect/mysql-test/connect/t/xml2_mult.test
index cd83827fe34..e9914c71aad 100644
--- a/storage/connect/mysql-test/connect/t/xml2_mult.test
+++ b/storage/connect/mysql-test/connect/t/xml2_mult.test
@@ -15,9 +15,9 @@ SET NAMES utf8;
--echo # Testing expanded values
--echo #
CREATE TABLE `bookstore` (
- `category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+ `category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
- `lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+ `lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/t/xml2_zip.test b/storage/connect/mysql-test/connect/t/xml2_zip.test
index d8c7894f861..df69f9dace3 100644
--- a/storage/connect/mysql-test/connect/t/xml2_zip.test
+++ b/storage/connect/mysql-test/connect/t/xml2_zip.test
@@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Testing zipped XML tables
--echo #
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
#testing discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=libxml2';
+OPTION_LIST='depth=0,xmlsup=libxml2';
SELECT * FROM t2;
DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/t/xml_html.test b/storage/connect/mysql-test/connect/t/xml_html.test
index 34d29953f68..1430f68d2b2 100644
--- a/storage/connect/mysql-test/connect/t/xml_html.test
+++ b/storage/connect/mysql-test/connect/t/xml_html.test
@@ -11,9 +11,9 @@ SET NAMES utf8;
--echo # Testing HTML like XML file
--echo #
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/t/xml_mult.test b/storage/connect/mysql-test/connect/t/xml_mult.test
index cf703e90da4..221d6734546 100644
--- a/storage/connect/mysql-test/connect/t/xml_mult.test
+++ b/storage/connect/mysql-test/connect/t/xml_mult.test
@@ -15,9 +15,9 @@ SET NAMES utf8;
--echo # Testing expanded values
--echo #
CREATE TABLE `bookstore` (
- `category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+ `category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
- `lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+ `lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test
index ad31ca46d4c..29ee2e0e607 100644
--- a/storage/connect/mysql-test/connect/t/xml_zip.test
+++ b/storage/connect/mysql-test/connect/t/xml_zip.test
@@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Testing zipped XML tables
--echo #
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
#testing discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=domdoc';
+OPTION_LIST='depth=0,xmlsup=domdoc';
SELECT * FROM t2;
DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test
index dce68c17eee..1f0a4eedee9 100644
--- a/storage/connect/mysql-test/connect/t/zip.test
+++ b/storage/connect/mysql-test/connect/t/zip.test
@@ -83,37 +83,37 @@ DROP TABLE t1,t2,t3,t4;
--echo #
CREATE TABLE t1 (
_id INT(2) NOT NULL,
-name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+name_first CHAR(9) NOT NULL JPATH='$.name.first',
+name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+name_last CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth CHAR(20) DEFAULT NULL,
death CHAR(20) DEFAULT NULL,
-contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs',
-awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award',
-awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year',
-awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by'
+contribs CHAR(50) NOT NULL JPATH='$.contribs',
+awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award',
+awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year',
+awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by'
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
SELECT * FROM t1;
# Test discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
-OPTION_LIST='LEVEL=5';
+OPTION_LIST='DEPTH=5';
SELECT * FROM t2;
CREATE TABLE t3 (
_id INT(2) NOT NULL,
-firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+firstname CHAR(9) NOT NULL JPATH='$.name.first',
+aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+lastname CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
-contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]',
-award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award',
-year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year',
-`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by'
+contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]',
+award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award',
+year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year',
+`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by'
) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
SELECT * FROM t3 WHERE _id = 1;
diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp
index 89b18f86323..e53ee1310e4 100644
--- a/storage/connect/myutil.cpp
+++ b/storage/connect/myutil.cpp
@@ -169,10 +169,9 @@ const char *PLGtoMYSQLtype(int type, bool dbf, char v)
case TYPE_BIGINT: return "BIGINT";
case TYPE_TINY: return "TINYINT";
case TYPE_DECIM: return "DECIMAL";
- default: return "CHAR(0)";
+ default: return (v) ? "VARCHAR" : "CHAR";
} // endswitch mytype
- return "CHAR(0)";
} // end of PLGtoMYSQLtype
/************************************************************************/
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index a40e32bcfb2..dd204d065ed 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -83,7 +83,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_ZIP = 27, /* ZIP file info table */
TAB_MONGO = 28, /* Table retrieved from MongoDB */
TAB_REST = 29, /* Table retrieved from Rest */
- TAB_NIY = 30}; /* Table not implemented yet */
+ TAB_BSON = 30, /* BSON Table (development) */
+ TAB_NIY = 31}; /* Table not implemented yet */
enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_ROWID = 1, /* ROWID type (special column) */
@@ -160,7 +161,7 @@ enum RECFM {RECFM_DFLT = 0, /* Default table type */
RECFM_FMT = 8, /* FMT formatted file */
RECFM_VCT = 9, /* VCT formatted files */
RECFM_XML = 10, /* XML formatted files */
- RECFM_JASON = 11, /* JASON formatted files */
+ RECFM_JSON = 11, /* JSON formatted files */
RECFM_DIR = 12, /* DIR table */
RECFM_ODBC = 13, /* Table accessed via ODBC */
RECFM_JDBC = 14, /* Table accessed via JDBC */
diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp
index e45feb31bea..f2887987c3f 100644
--- a/storage/connect/plugutil.cpp
+++ b/storage/connect/plugutil.cpp
@@ -96,7 +96,7 @@ char *msglang(void);
typedef struct {
ushort Segsize;
ushort Size;
- } AREASIZE;
+} AREASIZE;
ACTIVITY defActivity = { /* Describes activity and language */
NULL, /* Points to user work area(s) */
@@ -184,7 +184,7 @@ PGLOBAL PlugInit(LPCSTR Language, size_t worksize)
/***********************************************************************/
/* PlugExit: Terminate Plug operations. */
/***********************************************************************/
-int PlugExit(PGLOBAL g)
+PGLOBAL PlugExit(PGLOBAL g)
{
if (g) {
PDBUSER dup = PlgGetUser(g);
@@ -196,7 +196,7 @@ int PlugExit(PGLOBAL g)
delete g;
} // endif g
- return 0;
+ return NULL;
} // end of PlugExit
/***********************************************************************/
@@ -204,7 +204,7 @@ int PlugExit(PGLOBAL g)
/* Note: this routine is not really implemented for Unix. */
/***********************************************************************/
LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName)
- {
+{
#if defined(__WIN__)
char drive[_MAX_DRIVE];
#else
@@ -228,8 +228,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName)
htrc("buff='%-.256s'\n", pBuff);
return pBuff;
- } // end of PlugRemoveType
-
+} // end of PlugRemoveType
BOOL PlugIsAbsolutePath(LPCSTR path)
{
@@ -246,7 +245,7 @@ BOOL PlugIsAbsolutePath(LPCSTR path)
/* Note: this routine is not really implemented for Unix. */
/***********************************************************************/
LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
- {
+{
char newname[_MAX_PATH];
char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR];
char fname[_MAX_FNAME];
@@ -347,14 +346,14 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
} else
return FileName; // Error, return unchanged name
- } // end of PlugSetPath
+} // end of PlugSetPath
#if defined(XMSG)
/***********************************************************************/
/* PlugGetMessage: get a message from the message file. */
/***********************************************************************/
char *PlugReadMessage(PGLOBAL g, int mid, char *m)
- {
+{
char msgfile[_MAX_PATH], msgid[32], buff[256];
char *msg;
FILE *mfile = NULL;
@@ -378,9 +377,9 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m)
if (atoi(buff) == mid)
break;
- if (sscanf(buff, " %*d %-.256s \"%[^\"]", msgid, stmsg) < 2) {
+ if (sscanf(buff, " %*d %.31s \"%.255[^\"]", msgid, stmsg) < 2) {
// Old message file
- if (!sscanf(buff, " %*d \"%[^\"]", stmsg)) {
+ if (!sscanf(buff, " %*d \"%.255[^\"]", stmsg)) {
sprintf(stmsg, "Bad message file for %d %-.256s", mid, SVP(m));
goto fin;
} else
@@ -405,14 +404,14 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m)
msg = stmsg;
return msg;
- } // end of PlugReadMessage
+} // end of PlugReadMessage
#elif defined(NEWMSG)
/***********************************************************************/
/* PlugGetMessage: get a message from the resource string table. */
/***********************************************************************/
char *PlugGetMessage(PGLOBAL g, int mid)
- {
+{
char *msg;
#if 0 // was !defined(UNIX) && !defined(UNIV_LINUX)
@@ -440,7 +439,7 @@ char *PlugGetMessage(PGLOBAL g, int mid)
msg = stmsg;
return msg;
- } // end of PlugGetMessage
+} // end of PlugGetMessage
#endif // NEWMSG
#if defined(__WIN__)
@@ -448,13 +447,13 @@ char *PlugGetMessage(PGLOBAL g, int mid)
/* Return the line length of the console screen buffer. */
/***********************************************************************/
short GetLineLength(PGLOBAL g)
- {
+{
CONSOLE_SCREEN_BUFFER_INFO coninfo;
HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE);
BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo);
return (b) ? coninfo.dwSize.X : 0;
- } // end of GetLineLength
+} // end of GetLineLength
#endif // __WIN__
/***********************************************************************/
@@ -475,17 +474,19 @@ bool AllocSarea(PGLOBAL g, size_t size)
if (!g->Sarea) {
sprintf(g->Message, MSG(MALLOC_ERROR), "malloc");
g->Sarea_Size = 0;
- } else
- g->Sarea_Size = size;
+ } else {
+ g->Sarea_Size = size;
+ PlugSubSet(g->Sarea, size);
+ } // endif Sarea
#if defined(DEVELOPMENT)
if (true) {
#else
if (trace(8)) {
#endif
- if (g->Sarea)
+ if (g->Sarea) {
htrc("Work area of %zd allocated at %p\n", size, g->Sarea);
- else
+ } else
htrc("SareaAlloc: %-.256s\n", g->Message);
} // endif trace
@@ -526,13 +527,13 @@ void FreeSarea(PGLOBAL g)
/* the address and size not larger than memory size. */
/***********************************************************************/
BOOL PlugSubSet(void *memp, size_t size)
- {
+{
PPOOLHEADER pph = (PPOOLHEADER)memp;
pph->To_Free = (size_t)sizeof(POOLHEADER);
pph->FreeBlk = size - pph->To_Free;
return FALSE;
- } /* end of PlugSubSet */
+} /* end of PlugSubSet */
/***********************************************************************/
/* Use it to export a function that do throwing. */
@@ -595,7 +596,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
/* Program for sub-allocating and copying a string in a storage area. */
/***********************************************************************/
char *PlugDup(PGLOBAL g, const char *str)
- {
+{
if (str) {
char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1);
@@ -604,6 +605,33 @@ char *PlugDup(PGLOBAL g, const char *str)
} else
return NULL;
- } // end of PlugDup
+} // end of PlugDup
+
+/*************************************************************************/
+/* This routine makes a pointer from an offset to a memory pointer. */
+/*************************************************************************/
+void* MakePtr(void* memp, size_t offset)
+{
+ // return ((offset == 0) ? NULL : &((char*)memp)[offset]);
+ return (!offset) ? NULL : (char *)memp + offset;
+} /* end of MakePtr */
+
+/*************************************************************************/
+/* This routine makes an offset from a pointer new format. */
+/*************************************************************************/
+size_t MakeOff(void* memp, void* ptr)
+{
+ if (ptr) {
+#if defined(_DEBUG) || defined(DEVELOPMENT)
+ if (ptr <= memp) {
+ fprintf(stderr, "ptr %p <= memp %p", ptr, memp);
+ DoThrow(999);
+ } // endif ptr
+#endif // _DEBUG || DEVELOPMENT
+ return (size_t)(((char*)ptr) - ((char*)memp));
+ } else
+ return 0;
+
+} /* end of MakeOff */
-/*--------------------- End of PLUGUTIL program -----------------------*/
+/*---------------------- End of PLUGUTIL program ------------------------*/
diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp
new file mode 100644
index 00000000000..db63b8e78db
--- /dev/null
+++ b/storage/connect/tabbson.cpp
@@ -0,0 +1,2562 @@
+/************* tabbson C++ Program Source Code File (.CPP) *************/
+/* PROGRAM NAME: tabbson Version 1.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* This program are the BSON class DB execution routines. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* tdbdos.h is header containing the TDBDOS declarations. */
+/* json.h is header containing the JSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "maputil.h"
+#include "filamtxt.h"
+#include "tabdos.h"
+#include "tabbson.h"
+#include "filamap.h"
+#if defined(GZ_SUPPORT)
+#include "filamgz.h"
+#endif // GZ_SUPPORT
+#if defined(ZIP_SUPPORT)
+#include "filamzip.h"
+#endif // ZIP_SUPPORT
+#if defined(JAVA_SUPPORT)
+#include "jmgfam.h"
+#endif // JAVA_SUPPORT
+#if defined(CMGO_SUPPORT)
+#include "cmgfam.h"
+#endif // CMGO_SUPPORT
+#include "tabmul.h"
+#include "checklvl.h"
+#include "resource.h"
+#include "mycat.h" // for FNC_COL
+
+/***********************************************************************/
+/* This should be an option. */
+/***********************************************************************/
+#define MAXCOL 200 /* Default max column nb in result */
+//#define TYPE_UNKNOWN 12 /* Must be greater than other types */
+
+/***********************************************************************/
+/* External functions. */
+/***********************************************************************/
+USETEMP UseTemp(void);
+bool JsonAllPath(void);
+int GetDefaultDepth(void);
+char *GetJsonNull(void);
+
+/***********************************************************************/
+/* BSONColumns: construct the result blocks containing the description */
+/* of all the columns of a table contained inside a JSON file. */
+/***********************************************************************/
+PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
+{
+ static int buftyp[] = { TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT,
+ TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING };
+ static XFLD fldtyp[] = { FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC,
+ FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT };
+ static unsigned int length[] = { 0, 6, 8, 10, 10, 6, 6, 0 };
+ int i, n = 0;
+ int ncol = sizeof(buftyp) / sizeof(int);
+ PJCL jcp;
+ BSONDISC* pjdc = NULL;
+ PQRYRES qrp;
+ PCOLRES crp;
+
+ if (info) {
+ length[0] = 128;
+ length[7] = 256;
+ goto skipit;
+ } // endif info
+
+ if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
+ strcpy(g->Message, "Cannot find column definition for multiple table");
+ return NULL;
+ } // endif Multiple
+
+ pjdc = new(g) BSONDISC(g, length);
+
+ if (!(n = pjdc->GetColumns(g, db, dsn, topt)))
+ return NULL;
+
+skipit:
+ if (trace(1))
+ htrc("BSONColumns: n=%d len=%d\n", n, length[0]);
+
+ /*********************************************************************/
+ /* Allocate the structures used to refer to the result set. */
+ /*********************************************************************/
+ qrp = PlgAllocResult(g, ncol, n, IDS_COLUMNS + 3,
+ buftyp, fldtyp, length, false, false);
+
+ crp = qrp->Colresp->Next->Next->Next->Next->Next->Next;
+ crp->Name = PlugDup(g, "Nullable");
+ crp->Next->Name = PlugDup(g, "Jpath");
+
+ if (info || !qrp)
+ return qrp;
+
+ qrp->Nblin = n;
+
+ /*********************************************************************/
+ /* Now get the results into blocks. */
+ /*********************************************************************/
+ for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) {
+ if (jcp->Type == TYPE_UNKNOWN)
+ jcp->Type = TYPE_STRG; // Void column
+
+ crp = qrp->Colresp; // Column Name
+ crp->Kdata->SetValue(jcp->Name, i);
+ crp = crp->Next; // Data Type
+ crp->Kdata->SetValue(jcp->Type, i);
+ crp = crp->Next; // Type Name
+ crp->Kdata->SetValue(GetTypeName(jcp->Type), i);
+ crp = crp->Next; // Precision
+ crp->Kdata->SetValue(jcp->Len, i);
+ crp = crp->Next; // Length
+ crp->Kdata->SetValue(jcp->Len, i);
+ crp = crp->Next; // Scale (precision)
+ crp->Kdata->SetValue(jcp->Scale, i);
+ crp = crp->Next; // Nullable
+ crp->Kdata->SetValue(jcp->Cbn ? 1 : 0, i);
+ crp = crp->Next; // Field format
+
+ if (crp->Kdata)
+ crp->Kdata->SetValue(jcp->Fmt, i);
+
+ } // endfor i
+
+/*********************************************************************/
+/* Return the result pointer. */
+/*********************************************************************/
+ return qrp;
+} // end of BSONColumns
+
+/* -------------------------- Class BSONDISC ------------------------- */
+
+/***********************************************************************/
+/* Class used to get the columns of a JSON table. */
+/***********************************************************************/
+BSONDISC::BSONDISC(PGLOBAL g, uint* lg)
+{
+ length = lg;
+ jcp = fjcp = pjcp = NULL;
+ tdp = NULL;
+ tjnp = NULL;
+ jpp = NULL;
+ tjsp = NULL;
+ jsp = NULL;
+ bp = NULL;
+ row = NULL;
+ sep = NULL;
+ i = n = bf = ncol = lvl = sz = limit = 0;
+ all = strfy = false;
+} // end of BSONDISC constructor
+
+int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
+{
+ char filename[_MAX_PATH];
+ bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ PBVAL bdp = NULL;
+
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
+ sep = GetStringTableOption(g, topt, "Separator", ".");
+ sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
+ limit = GetIntegerTableOption(g, topt, "Limit", 10);
+ strfy = GetBooleanTableOption(g, topt, "Stringify", false);
+
+ /*********************************************************************/
+ /* Open the input file. */
+ /*********************************************************************/
+ tdp = new(g) BSONDEF;
+ tdp->G = NULL;
+#if defined(ZIP_SUPPORT)
+ tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
+ tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
+#endif // ZIP_SUPPORT
+ tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
+
+ if (!(tdp->Database = SetPath(g, db)))
+ return 0;
+
+ tdp->Objname = GetStringTableOption(g, topt, "Object", NULL);
+ tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0;
+ tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2);
+ tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL);
+ tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false);
+ tdp->Uri = (dsn && *dsn ? dsn : NULL);
+
+ if (!tdp->Fn && !tdp->Uri) {
+ strcpy(g->Message, MSG(MISSING_FNAME));
+ return 0;
+ } // endif Fn
+
+ if (tdp->Fn) {
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, tdp->Fn, tdp->GetPath());
+ tdp->Fn = PlugDup(g, filename);
+ } // endif Fn
+
+ if (trace(1))
+ htrc("File %s objname=%s pretty=%d lvl=%d\n",
+ tdp->Fn, tdp->Objname, tdp->Pretty, lvl);
+
+ if (tdp->Uri) {
+#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
+ tdp->Collname = GetStringTableOption(g, topt, "Name", NULL);
+ tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname);
+ tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test");
+ tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all");
+ tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false);
+ tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL);
+ tdp->Version = GetIntegerTableOption(g, topt, "Version", 3);
+ tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper",
+ (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface");
+ tdp->Pretty = 0;
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return 0;
+#endif // !MONGO_SUPPORT
+ } // endif Uri
+
+ if (tdp->Pretty == 2) {
+ tdp->G = g;
+
+ if (tdp->Zipped) {
+#if defined(ZIP_SUPPORT)
+ tjsp = new(g) TDBBSON(g, tdp, new(g) UNZFAM(tdp));
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return 0;
+#endif // !ZIP_SUPPORT
+ } else
+ tjsp = new(g) TDBBSON(g, tdp, new(g) MAPFAM(tdp));
+
+ if (tjsp->MakeDocument(g))
+ return 0;
+
+ bp = tjsp->Bp;
+// bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL;
+ bdp = tjsp->GetDoc();
+ jsp = bdp ? bp->GetArrayValue(bdp, 0) : NULL;
+ } else {
+ if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) {
+ if (!mgo) {
+ sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty);
+ return 0;
+ } else
+ tdp->Lrecl = 8192; // Should be enough
+
+ } // endif Lrecl
+
+ // Allocate the parse work memory
+ tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 4 : 2));
+ tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF);
+
+ if (tdp->Zipped) {
+#if defined(ZIP_SUPPORT)
+ tjnp = new(g)TDBBSN(g, tdp, new(g) UNZFAM(tdp));
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else if (tdp->Uri) {
+ if (tdp->Driver && toupper(*tdp->Driver) == 'C') {
+#if defined(CMGO_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp));
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "C");
+ return 0;
+#endif
+ } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') {
+#if defined(JAVA_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp));
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "Java");
+ return 0;
+#endif
+ } else { // Driver not specified
+#if defined(CMGO_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp));
+#elif defined(JAVA_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp));
+#else
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return 0;
+#endif
+ } // endif Driver
+
+ } else if (tdp->Pretty >= 0)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) DOSFAM(tdp));
+ else
+ tjnp = new(g) TDBBSN(g, tdp, new(g) BINFAM(tdp));
+
+ tjnp->SetMode(MODE_READ);
+ bp = tjnp->Bp;
+
+ if (tjnp->OpenDB(g))
+ return 0;
+
+ switch (tjnp->ReadDB(g)) {
+ case RC_EF:
+ strcpy(g->Message, "Void json table");
+ case RC_FX:
+ goto err;
+ default:
+ jsp = tjnp->Row;
+ } // endswitch ReadDB
+
+ } // endif pretty
+
+ if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) {
+ strcpy(g->Message, "Can only retrieve columns from object rows");
+ goto err;
+ } // endif row
+
+ all = GetBooleanTableOption(g, topt, "Fullarray", false);
+ jcol.Name = jcol.Fmt = NULL;
+ jcol.Next = NULL;
+ jcol.Found = true;
+ colname[0] = 0;
+
+ if (!tdp->Uri) {
+ fmt[0] = '$';
+ fmt[1] = '.';
+ bf = 2;
+ } // endif Uri
+
+ /*********************************************************************/
+ /* Analyse the JSON tree and define columns. */
+ /*********************************************************************/
+ for (i = 1; ; i++) {
+ for (jpp = row; jpp; jpp = bp->GetNext(jpp)) {
+ strncpy(colname, bp->GetKey(jpp), 64);
+ fmt[bf] = 0;
+
+ if (Find(g, bp->GetVlp(jpp), colname, MY_MIN(lvl, 0)))
+ goto err;
+
+ } // endfor jpp
+
+ // Missing column can be null
+ for (jcp = fjcp; jcp; jcp = jcp->Next) {
+ jcp->Cbn |= !jcp->Found;
+ jcp->Found = false;
+ } // endfor jcp
+
+ if (tdp->Pretty != 2) {
+ // Read next record
+ switch (tjnp->ReadDB(g)) {
+ case RC_EF:
+ jsp = NULL;
+ break;
+ case RC_FX:
+ goto err;
+ default:
+ jsp = tjnp->Row;
+ } // endswitch ReadDB
+
+ } else
+ jsp = bp->GetArrayValue(bdp, i);
+
+ if (!(row = (jsp) ? bp->GetObject(jsp) : NULL))
+ break;
+
+ } // endfor i
+
+ if (tdp->Pretty != 2)
+ tjnp->CloseDB(g);
+
+ return n;
+
+err:
+ if (tdp->Pretty != 2)
+ tjnp->CloseDB(g);
+
+ return 0;
+} // end of GetColumns
+
+bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
+{
+ char *p, *pc = colname + strlen(colname), buf[32];
+ int ars;
+ size_t n;
+ PBVAL job;
+ PBVAL jar;
+
+ if (jvp && !bp->IsJson(jvp)) {
+ if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = (JTYP)jvp->Type;
+
+ switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ jcol.Len = (int)strlen(bp->GetString(jvp));
+ break;
+ case TYPE_INTG:
+ case TYPE_BINT:
+ jcol.Len = (int)strlen(bp->GetString(jvp, buf));
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ jcol.Len = (int)strlen(bp->GetString(jvp, buf));
+ jcol.Scale = jvp->Nd;
+ break;
+ case TYPE_BOOL:
+ jcol.Len = 1;
+ break;
+ default:
+ jcol.Len = 0;
+ break;
+ } // endswitch Type
+
+ jcol.Scale = jvp->Nd;
+ jcol.Cbn = jvp->Type == TYPE_NULL;
+ } else if (!jvp || bp->IsValueNull(jvp)) {
+ jcol.Type = TYPE_UNKNOWN;
+ jcol.Len = jcol.Scale = 0;
+ jcol.Cbn = true;
+ } else if (j < lvl) {
+ if (!fmt[bf])
+ strcat(fmt, colname);
+
+ p = fmt + strlen(fmt);
+ jsp = jvp;
+
+ switch (jsp->Type) {
+ case TYPE_JOB:
+ job = jsp;
+
+ for (PBPR jrp = bp->GetObject(job); jrp; jrp = bp->GetNext(jrp)) {
+ PCSZ k = bp->GetKey(jrp);
+
+ if (*k != '$') {
+ n = sizeof(fmt) - strlen(fmt) - 1;
+ strncat(strncat(fmt, sep, n), k, n - strlen(sep));
+ n = sizeof(colname) - strlen(colname) - 1;
+ strncat(strncat(colname, "_", n), k, n - 1);
+ } // endif Key
+
+ if (Find(g, bp->GetVlp(jrp), k, j + 1))
+ return true;
+
+ *p = *pc = 0;
+ } // endfor jrp
+
+ return false;
+ case TYPE_JAR:
+ jar = jsp;
+
+ if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key)))
+ ars = MY_MIN(bp->GetArraySize(jar), limit);
+ else
+ ars = MY_MIN(bp->GetArraySize(jar), 1);
+
+ for (int k = 0; k < ars; k++) {
+ n = sizeof(fmt) - (strlen(fmt) + 1);
+
+ if (!tdp->Xcol || stricmp(tdp->Xcol, key)) {
+ sprintf(buf, "%d", k);
+
+ if (tdp->Uri) {
+ strncat(strncat(fmt, sep, n), buf, n - strlen(sep));
+ } else {
+ strncat(strncat(fmt, "[", n), buf, n - 1);
+ strncat(fmt, "]", n - (strlen(buf) + 1));
+ } // endif uri
+
+ if (all) {
+ n = sizeof(colname) - (strlen(colname) + 1);
+ strncat(strncat(colname, "_", n), buf, n - 1);
+ } // endif all
+
+ } else {
+ strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
+ }
+
+ if (Find(g, bp->GetArrayValue(jar, k), "", j))
+ return true;
+
+ *p = *pc = 0;
+ } // endfor k
+
+ return false;
+ default:
+ sprintf(g->Message, "Logical error after %s", fmt);
+ return true;
+ } // endswitch Type
+
+ } else if (lvl >= 0) {
+ if (strfy) {
+ if (!fmt[bf])
+ strcat(fmt, colname);
+
+ strcat(fmt, ".*");
+ } else if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = TYPE_STRG;
+ jcol.Len = sz;
+ jcol.Scale = 0;
+ jcol.Cbn = true;
+ } else
+ return false;
+
+ AddColumn(g);
+ return false;
+} // end of Find
+
+void BSONDISC::AddColumn(PGLOBAL g)
+{
+ bool b = fmt[bf] != 0; // True if formatted
+
+ // Check whether this column was already found
+ for (jcp = fjcp; jcp; jcp = jcp->Next)
+ if (!strcmp(colname, jcp->Name))
+ break;
+
+ if (jcp) {
+ if (jcp->Type != jcol.Type) {
+ if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL)
+ jcp->Type = jcol.Type;
+ // else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID)
+ // jcp->Type = TYPE_STRING;
+ else if (jcp->Type != TYPE_STRG)
+ switch (jcol.Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ jcp->Type = jcol.Type;
+ break;
+ case TYPE_BINT:
+ if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ case TYPE_INTG:
+ if (jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ default:
+ break;
+ } // endswith Type
+
+ } // endif Type
+
+ if (b && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) {
+ jcp->Fmt = PlugDup(g, fmt);
+ length[7] = MY_MAX(length[7], strlen(fmt));
+ } // endif fmt
+
+ jcp->Len = MY_MAX(jcp->Len, jcol.Len);
+ jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale);
+ jcp->Cbn |= jcol.Cbn;
+ jcp->Found = true;
+ } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) {
+ // New column
+ jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL));
+ *jcp = jcol;
+ jcp->Cbn |= (i > 1);
+ jcp->Name = PlugDup(g, colname);
+ length[0] = MY_MAX(length[0], strlen(colname));
+
+ if (b) {
+ jcp->Fmt = PlugDup(g, fmt);
+ length[7] = MY_MAX(length[7], strlen(fmt));
+ } else
+ jcp->Fmt = NULL;
+
+ if (pjcp) {
+ jcp->Next = pjcp->Next;
+ pjcp->Next = jcp;
+ } else
+ fjcp = jcp;
+
+ n++;
+ } // endif jcp
+
+ if (jcp)
+ pjcp = jcp;
+
+} // end of AddColumn
+
+/* -------------------------- Class BTUTIL --------------------------- */
+
+/***********************************************************************/
+/* Find the row in the tree structure. */
+/***********************************************************************/
+PBVAL BTUTIL::FindRow(PGLOBAL g)
+{
+ char *p, *objpath;
+ PBVAL jsp = Tp->Row;
+ PBVAL val = NULL;
+
+ for (objpath = PlugDup(g, Tp->Objname); jsp && objpath; objpath = p) {
+ if ((p = strchr(objpath, Tp->Sep)))
+ *p++ = 0;
+
+ if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key
+ val = (jsp->Type == TYPE_JOB) ?
+ GetKeyValue(jsp, objpath) : NULL;
+ } else {
+ if (*objpath == '[') {
+ if (objpath[strlen(objpath) - 1] == ']')
+ objpath++;
+ else
+ return NULL;
+ } // endif [
+
+ val = (jsp->Type == TYPE_JAR) ?
+ GetArrayValue(GetArray(jsp), atoi(objpath) - Tp->B) : NULL;
+ } // endif objpath
+
+ // jsp = (val) ? val->GetJson() : NULL;
+ jsp = val;
+ } // endfor objpath
+
+ return jsp;
+} // end of FindRow
+
+/***********************************************************************/
+/* Parse the read line. */
+/***********************************************************************/
+PBVAL BTUTIL::ParseLine(PGLOBAL g, int prty, bool cma)
+{
+ pretty = prty;
+ comma = cma;
+ return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line));
+} // end of ParseLine
+
+/***********************************************************************/
+/* Make the top tree from the object path. */
+/***********************************************************************/
+PBVAL BTUTIL::MakeTopTree(PGLOBAL g, int type)
+{
+ PBVAL top = NULL, val = NULL;
+
+ if (Tp->Objname) {
+ if (!Tp->Row) {
+ // Parse and allocate Objpath item(s)
+ char* p;
+ char *objpath = PlugDup(g, Tp->Objname);
+ int i;
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+
+ for (; objpath; objpath = p) {
+ if ((p = strchr(objpath, Tp->Sep)))
+ *p++ = 0;
+
+ if (*objpath != '[' && !IsNum(objpath)) {
+ objp = NewVal(TYPE_JOB);
+
+ if (!top)
+ top = objp;
+
+ if (val)
+ SetValueObj(val, objp);
+
+ val = NewVal();
+ SetKeyValue(objp, MOF(val), objpath);
+ } else {
+ if (*objpath == '[') {
+ // Old style
+ if (objpath[strlen(objpath) - 1] != ']') {
+ sprintf(g->Message, "Invalid Table path %s", Tp->Objname);
+ return NULL;
+ } else
+ objpath++;
+
+ } // endif objpath
+
+ if (!top)
+ top = NewVal(TYPE_JAR);
+
+ if (val)
+ SetValueArr(val, arp);
+
+ val = NewVal();
+ i = atoi(objpath) - Tp->B;
+ SetArrayValue(arp, val, i);
+ } // endif objpath
+
+ } // endfor p
+
+ } // endif Val
+
+ Tp->Row = val;
+ if (Tp->Row) Tp->Row->Type = type;
+ } else
+ top = Tp->Row = NewVal(type);
+
+ return top;
+} // end of MakeTopTree
+
+PSZ BTUTIL::SerialVal(PGLOBAL g, PBVAL vlp, int pretty)
+{
+ return Serialize(g, vlp, NULL, pretty);
+} // en of SerialTop
+
+/* -------------------------- Class BCUTIL --------------------------- */
+
+/***********************************************************************/
+/* SetValue: Set a value from a BVALUE contains. */
+/***********************************************************************/
+void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
+{
+ if (jvp) {
+ vp->SetNull(false);
+
+ if (Jb) {
+ vp->SetValue_psz(Serialize(g, jvp, NULL, 0));
+ Jb = false;
+ } else switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_INTG:
+ case TYPE_BINT:
+ case TYPE_DBL:
+ case TYPE_DTM:
+ case TYPE_FLOAT:
+ switch (vp->GetType()) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ case TYPE_DECIM:
+ vp->SetValue_psz(GetString(jvp));
+ break;
+ case TYPE_INT:
+ case TYPE_SHORT:
+ case TYPE_TINY:
+ vp->SetValue(GetInteger(jvp));
+ break;
+ case TYPE_BIGINT:
+ vp->SetValue(GetBigint(jvp));
+ break;
+ case TYPE_DOUBLE:
+ vp->SetValue(GetDouble(jvp));
+
+ if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT)
+ vp->SetPrec(jvp->Nd);
+
+ break;
+ default:
+ sprintf(G->Message, "Unsupported column type %d", vp->GetType());
+ throw 888;
+ } // endswitch Type
+
+ break;
+ case TYPE_BOOL:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetInteger(jvp) ? 1 : 0);
+ else
+ vp->SetValue_psz((PSZ)(GetInteger(jvp) ? "true" : "false"));
+
+ break;
+ case TYPE_JAR:
+ case TYPE_JOB:
+ // SetJsonValue(g, vp, val->GetArray()->GetValue(0));
+ vp->SetValue_psz(GetValueText(g, jvp, NULL));
+ break;
+ default:
+ vp->Reset();
+ vp->SetNull(true);
+ } // endswitch Type
+
+ } else {
+ vp->Reset();
+ vp->SetNull(true);
+ } // endif val
+
+} // end of SetJsonValue
+
+/***********************************************************************/
+/* MakeJson: Serialize the json item and set value to it. */
+/***********************************************************************/
+PBVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp, int n)
+{
+ PBVAL vlp, jvp = jsp;
+
+ if (n < Cp->Nod - 1) {
+ if (jsp->Type == TYPE_JAR) {
+ int ars = GetArraySize(jsp);
+ PJNODE jnp = &Cp->Nodes[n];
+
+ jvp = NewVal(TYPE_JAR);
+ jnp->Op = OP_EQ;
+
+ for (int i = 0; i < ars; i++) {
+ jnp->Rank = i;
+ vlp = GetRowValue(g, jsp, n);
+ AddArrayValue(jvp,DupVal(vlp));
+ } // endfor i
+
+ jnp->Op = OP_XX;
+ jnp->Rank = 0;
+ } else if (jsp->Type == TYPE_JOB) {
+ jvp = NewVal(TYPE_JOB);
+
+ for (PBPR prp = GetObject(jsp); prp; prp = GetNext(prp)) {
+ vlp = GetRowValue(g, GetVlp(prp), n + 1);
+ SetKeyValue(jvp, vlp, MZP(prp->Key));
+ } // endfor prp
+
+ } // endif Type
+
+ } // endif's
+
+ Jb = true;
+ return jvp;
+} // end of MakeBson
+
+/***********************************************************************/
+/* GetRowValue: */
+/***********************************************************************/
+PBVAL BCUTIL::GetRowValue(PGLOBAL g, PBVAL row, int i)
+{
+ int nod = Cp->Nod, n = nod - 1;
+ JNODE *nodes = Cp->Nodes;
+ PBVAL arp;
+ PBVAL bvp = NULL;
+
+ for (; i < nod && row; i++) {
+ if (nodes[i].Op == OP_NUM) {
+ bvp = NewVal(TYPE_INT);
+ bvp->N = (row->Type == TYPE_JAR) ? GetSize(row) : 1;
+ return(bvp);
+ } else if (nodes[i].Op == OP_XX) {
+ return MakeBson(g, row, i);
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!nodes[i].Key) {
+ // Expected Array was not there, wrap the value
+ if (i < nod - 1)
+ continue;
+ else
+ bvp = row;
+
+ } else
+ bvp = GetKeyValue(row, nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!nodes[i].Key) {
+ if (nodes[i].Op == OP_EQ)
+ bvp = GetArrayValue(arp, nodes[i].Rank);
+ else if (nodes[i].Op == OP_EXP)
+ return NewVal(ExpandArray(g, arp, i));
+ else
+ return NewVal(CalculateArray(g, arp, i));
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ bvp = GetArrayValue(arp, 0);
+ i--;
+ } // endif's
+
+ break;
+ case TYPE_JVAL:
+ bvp = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ bvp = NULL;
+ } // endswitch Type
+
+ if (i < nod - 1)
+ row = bvp;
+
+ } // endfor i
+
+ return bvp;
+} // end of GetColumnValue
+
+/***********************************************************************/
+/* GetColumnValue: */
+/***********************************************************************/
+PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i)
+{
+ PVAL value = Cp->Value;
+ PBVAL bvp = GetRowValue(g, row, i);
+
+ SetJsonValue(g, value, bvp);
+ return value;
+} // end of GetColumnValue
+
+/***********************************************************************/
+/* ExpandArray: */
+/***********************************************************************/
+PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n)
+{
+ int nod = Cp->Nod, ars = MY_MIN(Tp->Limit, GetArraySize(arp));
+ JNODE *nodes = Cp->Nodes;
+ PVAL value = Cp->Value;
+ PBVAL bvp;
+ BVAL bval;
+
+ if (!ars) {
+ value->Reset();
+ value->SetNull(true);
+ Tp->NextSame = 0;
+ return value;
+ } // endif ars
+
+ if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) {
+ strcpy(g->Message, "Logical error expanding array");
+ throw 666;
+ } // endif jvp
+
+ if (n < nod - 1 && IsJson(bvp)) {
+ SetValue(&bval, GetColumnValue(g, bvp, n + 1));
+ bvp = &bval;
+ } // endif n
+
+ if (n >= Tp->NextSame) {
+ if (++nodes[n].Nx == ars) {
+ nodes[n].Nx = 0;
+ Cp->Xnod = 0;
+ } else
+ Cp->Xnod = n;
+
+ Tp->NextSame = Cp->Xnod;
+ } // endif NextSame
+
+ SetJsonValue(g, value, bvp);
+ return value;
+} // end of ExpandArray
+
+/***********************************************************************/
+/* CalculateArray: */
+/***********************************************************************/
+PVAL BCUTIL::CalculateArray(PGLOBAL g, PBVAL arp, int n)
+{
+ int i, ars, nv = 0, nextsame = Tp->NextSame;
+ bool err;
+ int nod = Cp->Nod;
+ JNODE *nodes = Cp->Nodes;
+ OPVAL op = nodes[n].Op;
+ PVAL val[2], vp = nodes[n].Valp, mulval = Cp->MulVal;
+ PBVAL jvrp, jvp;
+ BVAL jval;
+
+ vp->Reset();
+ ars = MY_MIN(Tp->Limit, GetArraySize(arp));
+ xtrc(1,"CalculateArray: size=%d op=%d nextsame=%d\n", ars, op, nextsame);
+
+ for (i = 0; i < ars; i++) {
+ jvrp = GetArrayValue(arp, i);
+ xtrc(1, "i=%d nv=%d\n", i, nv);
+
+ if (!IsValueNull(jvrp) || (op == OP_CNC && GetJsonNull())) do {
+ if (IsValueNull(jvrp)) {
+ SetString(jvrp, PlugDup(G, GetJsonNull()));
+ jvp = jvrp;
+ } else if (n < nod - 1 && IsJson(jvrp)) {
+ Tp->NextSame = nextsame;
+ SetValue(&jval, GetColumnValue(g, jvrp, n + 1));
+ jvp = &jval;
+ } else
+ jvp = jvrp;
+
+ xtrc(1, "jvp=%s null=%d\n", GetString(jvp), IsValueNull(jvp) ? 1 : 0);
+
+ if (!nv++) {
+ SetJsonValue(g, vp, jvp);
+ continue;
+ } else
+ SetJsonValue(g, mulval, jvp);
+
+ if (!mulval->IsNull()) {
+ switch (op) {
+ case OP_CNC:
+ if (nodes[n].CncVal) {
+ val[0] = nodes[n].CncVal;
+ err = vp->Compute(g, val, 1, op);
+ } // endif CncVal
+
+ val[0] = mulval;
+ err = vp->Compute(g, val, 1, op);
+ break;
+ // case OP_NUM:
+ case OP_SEP:
+ val[0] = nodes[n].Valp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, OP_ADD);
+ break;
+ default:
+ val[0] = nodes[n].Valp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, op);
+ } // endswitch Op
+
+ if (err)
+ vp->Reset();
+
+ if (trace(1)) {
+ char buf(32);
+
+ htrc("vp='%s' err=%d\n",
+ vp->GetCharString(&buf), err ? 1 : 0);
+
+ } // endif trace
+
+ } // endif Null
+
+ } while (Tp->NextSame > nextsame);
+
+ } // endfor i
+
+ if (op == OP_SEP) {
+ // Calculate average
+ mulval->SetValue(nv);
+ val[0] = vp;
+ val[1] = mulval;
+
+ if (vp->Compute(g, val, 2, OP_DIV))
+ vp->Reset();
+
+ } // endif Op
+
+ Tp->NextSame = nextsame;
+ return vp;
+} // end of CalculateArray
+
+/***********************************************************************/
+/* GetRow: Get the object containing this column. */
+/***********************************************************************/
+PBVAL BCUTIL::GetRow(PGLOBAL g)
+{
+ int nod = Cp->Nod;
+ JNODE *nodes = Cp->Nodes;
+ PBVAL val = NULL;
+ PBVAL arp;
+ PBVAL nwr, row = Tp->Row;
+
+ for (int i = 0; i < nod && row; i++) {
+ if (i < nod-1 && nodes[i+1].Op == OP_XX)
+ break;
+ else switch (row->Type) {
+ case TYPE_JOB:
+ if (!nodes[i].Key)
+ // Expected Array was not there, wrap the value
+ continue;
+
+ val = GetKeyValue(row, nodes[i].Key);
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!nodes[i].Key) {
+ if (nodes[i].Op == OP_EQ)
+ val = GetArrayValue(arp, nodes[i].Rank);
+ else
+ val = GetArrayValue(arp, nodes[i].Rx);
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ val = GetArrayValue(arp, 0);
+ i--;
+ } // endif Nodes
+
+ break;
+ case TYPE_JVAL:
+ val = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ val = NULL;
+ } // endswitch Type
+
+ if (val) {
+ row = val;
+ } else {
+ // Construct missing objects
+ for (i++; row && i < nod; i++) {
+ int type;
+
+ if (nodes[i].Op == OP_XX)
+ break;
+ else if (!nodes[i].Key)
+ // Construct intermediate array
+ type = TYPE_JAR;
+ else
+ type = TYPE_JOB;
+
+ if (row->Type == TYPE_JOB) {
+ nwr = AddPair(row, nodes[i - 1].Key, type);
+ } else if (row->Type == TYPE_JAR) {
+ AddArrayValue(row, (nwr = NewVal(type)));
+ } else {
+ strcpy(g->Message, "Wrong type when writing new row");
+ nwr = NULL;
+ } // endif's
+
+ row = nwr;
+ } // endfor i
+
+ break;
+ } // endelse
+
+ } // endfor i
+
+ return row;
+} // end of GetRow
+
+
+/* -------------------------- Class BSONDEF -------------------------- */
+
+BSONDEF::BSONDEF(void)
+{
+ Jmode = MODE_OBJECT;
+ Objname = NULL;
+ Xcol = NULL;
+ Pretty = 2;
+ Limit = 1;
+ Base = 0;
+ Strict = false;
+ Sep = '.';
+ Uri = NULL;
+ Collname = Options = Filter = NULL;
+ Pipe = false;
+ Driver = NULL;
+ Version = 0;
+ Wrapname = NULL;
+} // end of BSONDEF constructor
+
+/***********************************************************************/
+/* DefineAM: define specific AM block values. */
+/***********************************************************************/
+bool BSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
+{
+ G = g;
+ Schema = GetStringCatInfo(g, "DBname", Schema);
+ Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT);
+ Objname = GetStringCatInfo(g, "Object", NULL);
+ Xcol = GetStringCatInfo(g, "Expand", NULL);
+ Pretty = GetIntCatInfo("Pretty", 2);
+ Limit = GetIntCatInfo("Limit", 10);
+ Base = GetIntCatInfo("Base", 0) ? 1 : 0;
+ Sep = *GetStringCatInfo(g, "Separator", ".");
+ Accept = GetBoolCatInfo("Accept", false);
+
+ // Don't use url as MONGO uri when called from REST
+ if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) {
+#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
+ Collname = GetStringCatInfo(g, "Name",
+ (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ Collname = GetStringCatInfo(g, "Tabname", Collname);
+ Options = GetStringCatInfo(g, "Colist", NULL);
+ Filter = GetStringCatInfo(g, "Filter", NULL);
+ Pipe = GetBoolCatInfo("Pipeline", false);
+ Driver = GetStringCatInfo(g, "Driver", NULL);
+ Version = GetIntCatInfo("Version", 3);
+ Pretty = 0;
+#if defined(JAVA_SUPPORT)
+ if (Version == 2)
+ Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo2Interface");
+ else
+ Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo3Interface");
+#endif // JAVA_SUPPORT
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return true;
+#endif // !MONGO_SUPPORT
+ } // endif Uri
+
+ return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff);
+} // end of DefineAM
+
+/***********************************************************************/
+/* GetTable: makes a new Table Description Block. */
+/***********************************************************************/
+PTDB BSONDEF::GetTable(PGLOBAL g, MODE m)
+{
+ if (trace(1))
+ htrc("BSON GetTable Pretty=%d Uri=%s\n", Pretty, SVP(Uri));
+
+ if (Catfunc == FNC_COL)
+ return new(g)TDBBCL(this);
+
+ PTDBASE tdbp;
+ PTXF txfp = NULL;
+
+ // JSN not used for pretty=1 for insert or delete
+ if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
+ USETEMP tmp = UseTemp();
+ bool map = Mapped && Pretty >= 0 && m != MODE_INSERT &&
+ !(tmp != TMP_NO && m == MODE_UPDATE) &&
+ !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE));
+
+ if (Lrecl) {
+ // Allocate the parse work memory
+ G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 2 : 4));
+ } else {
+ strcpy(g->Message, "LRECL is not defined");
+ return NULL;
+ } // endif Lrecl
+
+ if (Pretty < 0) { // BJsonfile
+ txfp = new(g) BINFAM(this);
+ } else if (Uri) {
+ if (Driver && toupper(*Driver) == 'C') {
+#if defined(CMGO_SUPPORT)
+ txfp = new(g) CMGFAM(this);
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "C");
+ return NULL;
+#endif
+ } else if (Driver && toupper(*Driver) == 'J') {
+#if defined(JAVA_SUPPORT)
+ txfp = new(g) JMGFAM(this);
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "Java");
+ return NULL;
+#endif
+ } else { // Driver not specified
+#if defined(CMGO_SUPPORT)
+ txfp = new(g) CMGFAM(this);
+#elif defined(JAVA_SUPPORT)
+ txfp = new(g) JMGFAM(this);
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return NULL;
+#endif // !MONGO_SUPPORT
+ } // endif Driver
+
+ } else if (Zipped) {
+#if defined(ZIP_SUPPORT)
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else if (Compressed) {
+#if defined(GZ_SUPPORT)
+ if (Compressed == 1)
+ txfp = new(g) GZFAM(this);
+ else
+ txfp = new(g) ZLBFAM(this);
+#else // !GZ_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ");
+ return NULL;
+#endif // !GZ_SUPPORT
+ } else if (map) {
+ txfp = new(g) MAPFAM(this);
+ } else
+ txfp = new(g) DOSFAM(this);
+
+ // Txfp must be set for TDBBSN
+ tdbp = new(g) TDBBSN(g, this, txfp);
+ } else {
+ if (Zipped) {
+#if defined(ZIP_SUPPORT)
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ return NULL;
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else
+ txfp = new(g) MAPFAM(this);
+
+ tdbp = new(g) TDBBSON(g, this, txfp);
+ } // endif Pretty
+
+ if (Multiple)
+ tdbp = new(g) TDBMUL(tdbp);
+
+ return tdbp;
+} // end of GetTable
+
+/* --------------------------- Class TDBBSN -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBBSN class (Pretty < 2) */
+/***********************************************************************/
+TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
+{
+ Bp = new(g) BTUTIL(tdp->G, this);
+ Top = NULL;
+ Row = NULL;
+ Colp = NULL;
+
+ if (tdp) {
+ Jmode = tdp->Jmode;
+ Objname = tdp->Objname;
+ Xcol = tdp->Xcol;
+ Limit = tdp->Limit;
+ Pretty = tdp->Pretty;
+ B = tdp->Base ? 1 : 0;
+ Sep = tdp->Sep;
+ Strict = tdp->Strict;
+ } else {
+ Jmode = MODE_OBJECT;
+ Objname = NULL;
+ Xcol = NULL;
+ Limit = 1;
+ Pretty = 0;
+ B = 0;
+ Sep = '.';
+ Strict = false;
+ } // endif tdp
+
+ Fpos = -1;
+ N = M = 0;
+ NextSame = 0;
+ SameRow = 0;
+ Xval = -1;
+ Comma = false;
+ Bp->SetPretty(Pretty);
+} // end of TDBBSN standard constructor
+
+TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp)
+{
+ Bp = tdbp->Bp;
+ Top = tdbp->Top;
+ Row = tdbp->Row;
+ Colp = tdbp->Colp;
+ Jmode = tdbp->Jmode;
+ Objname = tdbp->Objname;
+ Xcol = tdbp->Xcol;
+ Fpos = tdbp->Fpos;
+ N = tdbp->N;
+ M = tdbp->M;
+ Limit = tdbp->Limit;
+ NextSame = tdbp->NextSame;
+ SameRow = tdbp->SameRow;
+ Xval = tdbp->Xval;
+ B = tdbp->B;
+ Sep = tdbp->Sep;
+ Pretty = tdbp->Pretty;
+ Strict = tdbp->Strict;
+ Comma = tdbp->Comma;
+} // end of TDBBSN copy constructor
+
+// Used for update
+PTDB TDBBSN::Clone(PTABS t)
+{
+ PTDB tp;
+ PBSCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBBSN(this);
+
+ for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) {
+ cp2 = new(g) BSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+} // end of Clone
+
+/***********************************************************************/
+/* Allocate JSN column description block. */
+/***********************************************************************/
+PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
+{
+ PBSCOL colp = new(g) BSONCOL(g, cdp, this, cprec, n);
+
+ return (colp->ParseJpath(g)) ? NULL : colp;
+} // end of MakeCol
+
+/***********************************************************************/
+/* InsertSpecialColumn: Put a special column ahead of the column list.*/
+/***********************************************************************/
+PCOL TDBBSN::InsertSpecialColumn(PCOL colp)
+{
+ if (!colp->IsSpecial())
+ return NULL;
+
+ //if (Xcol && ((SPCBLK*)colp)->GetRnm())
+ // colp->SetKey(0); // Rownum is no more a key
+
+ colp->SetNext(Columns);
+ Columns = colp;
+ return colp;
+} // end of InsertSpecialColumn
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBBSN::Cardinality(PGLOBAL g)
+{
+ if (!g)
+ return 0;
+ else if (Cardinal < 0) {
+ Cardinal = TDBDOS::Cardinality(g);
+
+ } // endif Cardinal
+
+ return Cardinal;
+} // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns file size estimate in number of lines. */
+/***********************************************************************/
+int TDBBSN::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0)
+ MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* JSON EstimatedLength. Returns an estimated minimum line length. */
+/***********************************************************************/
+int TDBBSN::EstimatedLength(void)
+{
+ if (AvgLen <= 0)
+ return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better
+ else
+ return AvgLen;
+
+} // end of Estimated Length
+
+/***********************************************************************/
+/* OpenDB: Data Base open routine for JSN access method. */
+/***********************************************************************/
+bool TDBBSN::OpenDB(PGLOBAL g)
+{
+ TUSE use = Use;
+
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. ??? */
+ /*******************************************************************/
+ Fpos = -1;
+ NextSame = 0;
+ SameRow = 0;
+ } // endif Use
+
+ /*********************************************************************/
+ /* Open according to logical input/output mode required. */
+ /*********************************************************************/
+ if (TDBDOS::OpenDB(g))
+ return true;
+
+ if (use == USE_OPEN)
+ return false;
+
+ if (Pretty < 0) {
+ /*********************************************************************/
+ /* Binary BJSON table. */
+ /*********************************************************************/
+ xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n",
+ this, Tdb_No, Use, Mode);
+
+ // Lrecl is Ok
+ size_t linelen = Lrecl;
+ MODE mode = Mode;
+
+ // Buffer must be allocated in G->Sarea
+ Mode = MODE_ANY;
+ Txfp->AllocateBuffer(Bp->G);
+ Mode = mode;
+
+ if (Mode == MODE_INSERT)
+ Bp->SubSet(true);
+ else
+ Bp->MemSave();
+
+ To_Line = Txfp->GetBuf();
+ memset(To_Line, 0, linelen);
+ xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line);
+ } // endif Pretty
+
+ /***********************************************************************/
+ /* First opening. */
+ /***********************************************************************/
+ if (Mode == MODE_INSERT) {
+ int type;
+
+ switch (Jmode) {
+ case MODE_OBJECT: type = TYPE_JOB; break;
+ case MODE_ARRAY: type = TYPE_JAR; break;
+ case MODE_VALUE: type = TYPE_JVAL; break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ Top = Bp->MakeTopTree(g, type);
+ Bp->MemSave();
+ } // endif Mode
+
+ if (Xcol)
+ To_Filter = NULL; // Not compatible
+
+ return false;
+} // end of OpenDB
+
+/***********************************************************************/
+/* SkipHeader: Physically skip first header line if applicable. */
+/* This is called from TDBDOS::OpenDB and must be executed before */
+/* Kindex construction if the file is accessed using an index. */
+/***********************************************************************/
+bool TDBBSN::SkipHeader(PGLOBAL g)
+{
+ int len = GetFileLength(g);
+ bool rc = false;
+
+#if defined(_DEBUG)
+ if (len < 0)
+ return true;
+#endif // _DEBUG
+
+ if (Pretty == 1) {
+ if (Mode == MODE_INSERT || Mode == MODE_DELETE) {
+ // Mode Insert and delete are no more handled here
+ DBUG_ASSERT(false);
+ } else if (len > 0) // !Insert && !Delete
+ rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g));
+
+ } // endif Pretty
+
+ return rc;
+} // end of SkipHeader
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSN access method. */
+/***********************************************************************/
+int TDBBSN::ReadDB(PGLOBAL g)
+{
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = 0;
+ M++;
+ return RC_OK;
+ } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
+ if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
+ return rc; // Deferred reading failed
+
+ if (Pretty >= 0) {
+ // Recover the memory used for parsing
+ Bp->SubSet();
+
+ if ((Row = Bp->ParseLine(g, Pretty, Comma))) {
+ Top = Row;
+ Row = Bp->FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } else if (Pretty != 1 || strcmp(To_Line, "]")) {
+ Bp->GetMsg(g);
+ rc = RC_FX;
+ } else
+ rc = RC_EF;
+
+ } else { // Here we get a movable Json binary tree
+ Bp->MemSet(((BINFAM*)Txfp)->Recsize); // Useful when updating
+ Row = Top = (PBVAL)To_Line;
+ Row = Bp->FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } // endif Pretty
+
+ } // endif ReadDB
+
+ return rc;
+} // end of ReadDB
+
+/***********************************************************************/
+/* PrepareWriting: Prepare the line for WriteDB. */
+/***********************************************************************/
+bool TDBBSN::PrepareWriting(PGLOBAL g)
+{
+ if (Pretty >= 0) {
+ PSZ s;
+
+// if (!(Top = Bp->MakeTopTree(g, Row->Type)))
+// return true;
+
+ if ((s = Bp->SerialVal(g, Top, Pretty))) {
+ if (Comma)
+ strcat(s, ",");
+
+ if ((signed)strlen(s) > Lrecl) {
+ strncpy(To_Line, s, Lrecl);
+ sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl);
+ return PushWarning(g, this);
+ } else
+ strcpy(To_Line, s);
+
+ return false;
+ } else
+ return true;
+ } else
+ ((BINFAM*)Txfp)->Recsize = ((size_t)PlugSubAlloc(Bp->G, NULL, 0)
+ - (size_t)To_Line);
+ return false;
+} // end of PrepareWriting
+
+/***********************************************************************/
+/* WriteDB: Data Base write routine for JSON access method. */
+/***********************************************************************/
+int TDBBSN::WriteDB(PGLOBAL g) {
+ int rc = TDBDOS::WriteDB(g);
+
+ Bp->SubSet();
+ Bp->Clear(Row);
+ return rc;
+} // end of WriteDB
+
+/***********************************************************************/
+/* Data Base close routine for JSON access method. */
+/***********************************************************************/
+void TDBBSN::CloseDB(PGLOBAL g)
+{
+ TDBDOS::CloseDB(g);
+ Bp->G = PlugExit(Bp->G);
+} // end of CloseDB
+
+/* ---------------------------- BSONCOL ------------------------------ */
+
+/***********************************************************************/
+/* BSONCOL public constructor. */
+/***********************************************************************/
+BSONCOL::BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
+ : DOSCOL(g, cdp, tdbp, cprec, i, "DOS")
+{
+ Tbp = (TDBBSN*)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
+ Cp = new(g) BCUTIL(((PBDEF)Tbp->To_Def)->G, this, Tbp);
+ Jpath = cdp->GetFmt();
+ MulVal = NULL;
+ Nodes = NULL;
+ Nod = 0;
+ Sep = Tbp->Sep;
+ Xnod = -1;
+ Xpd = false;
+ Parsed = false;
+ Warned = false;
+} // end of BSONCOL constructor
+
+/***********************************************************************/
+/* BSONCOL constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+BSONCOL::BSONCOL(BSONCOL* col1, PTDB tdbp) : DOSCOL(col1, tdbp)
+{
+ Tbp = col1->Tbp;
+ Cp = col1->Cp;
+ Jpath = col1->Jpath;
+ MulVal = col1->MulVal;
+ Nodes = col1->Nodes;
+ Nod = col1->Nod;
+ Sep = col1->Sep;
+ Xnod = col1->Xnod;
+ Xpd = col1->Xpd;
+ Parsed = col1->Parsed;
+ Warned = col1->Warned;
+} // end of BSONCOL copy constructor
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool BSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+{
+ if (DOSCOL::SetBuffer(g, value, ok, check))
+ return true;
+
+ // Parse the json path
+ if (ParseJpath(g))
+ return true;
+
+ Tbp = (TDBBSN*)To_Tdb;
+ return false;
+} // end of SetBuffer
+
+/***********************************************************************/
+/* Check whether this object is expanded. */
+/***********************************************************************/
+bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
+{
+ if ((Tbp->Xcol && nm && !strcmp(nm, Tbp->Xcol) &&
+ (Tbp->Xval < 0 || Tbp->Xval == i)) || Xpd) {
+ Xpd = true; // Expandable object
+ Nodes[i].Op = OP_EXP;
+ } else if (b) {
+ strcpy(g->Message, "Cannot expand more than one branch");
+ return true;
+ } // endif Xcol
+
+ return false;
+} // end of CheckExpand
+
+/***********************************************************************/
+/* Analyse array processing options. */
+/***********************************************************************/
+bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
+{
+ int n;
+ bool dg = true, b = false;
+ PJNODE jnp = &Nodes[i];
+
+ //if (*p == '[') p++; // Old syntax .[ or :[
+ n = (int)strlen(p);
+
+ if (*p) {
+ if (p[n - 1] == ']') {
+ p[--n] = 0;
+ } else if (!IsNum(p)) {
+ // Wrong array specification
+ sprintf(g->Message, "Invalid array specification %s for %s", p, Name);
+ return true;
+ } // endif p
+
+ } else
+ b = true;
+
+ // To check whether a numeric Rank was specified
+ dg = IsNum(p);
+
+ if (!n) {
+ // Default specifications
+ if (CheckExpand(g, i, nm, false))
+ return true;
+ else if (jnp->Op != OP_EXP) {
+ if (b) {
+ // Return 1st value (B is the index base)
+ jnp->Rank = Tbp->B;
+ jnp->Op = OP_EQ;
+ } else if (!Value->IsTypeNum()) {
+ jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING);
+ jnp->Op = OP_CNC;
+ } else
+ jnp->Op = OP_ADD;
+
+ } // endif OP
+
+ } else if (dg) {
+ // Return nth value
+ jnp->Rank = atoi(p) - Tbp->B;
+ jnp->Op = OP_EQ;
+ } else if (n == 1) {
+ // Set the Op value;
+ if (Sep == ':')
+ switch (*p) {
+ case '*': *p = 'x'; break;
+ case 'x':
+ case 'X': *p = '*'; break; // Expand this array
+ default: break;
+ } // endswitch p
+
+ switch (*p) {
+ case '+': jnp->Op = OP_ADD; break;
+ case 'x': jnp->Op = OP_MULT; break;
+ case '>': jnp->Op = OP_MAX; break;
+ case '<': jnp->Op = OP_MIN; break;
+ case '!': jnp->Op = OP_SEP; break; // Average
+ case '#': jnp->Op = OP_NUM; break;
+ case '*': // Expand this array
+ if (!Tbp->Xcol && nm) {
+ Xpd = true;
+ jnp->Op = OP_EXP;
+ Tbp->Xval = i;
+ Tbp->Xcol = nm;
+ } else if (CheckExpand(g, i, nm, true))
+ return true;
+
+ break;
+ default:
+ sprintf(g->Message,
+ "Invalid function specification %c for %s", *p, Name);
+ return true;
+ } // endswitch *p
+
+ } else if (*p == '"' && p[n - 1] == '"') {
+ // This is a concat specification
+ jnp->Op = OP_CNC;
+
+ if (n > 2) {
+ // Set concat intermediate string
+ p[n - 1] = 0;
+ jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING);
+ } // endif n
+
+ } else {
+ sprintf(g->Message, "Wrong array specification for %s", Name);
+ return true;
+ } // endif's
+
+ // For calculated arrays, a local Value must be used
+ switch (jnp->Op) {
+ case OP_NUM:
+ jnp->Valp = AllocateValue(g, TYPE_INT);
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ case OP_SEP:
+ if (!IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
+ break;
+ case OP_CNC:
+ if (IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ if (jnp->Valp)
+ MulVal = AllocateValue(g, jnp->Valp);
+
+ return false;
+} // end of SetArrayOptions
+
+/***********************************************************************/
+/* Parse the eventual passed Jpath information. */
+/* This information can be specified in the Fieldfmt column option */
+/* when creating the table. It permits to indicate the position of */
+/* the node corresponding to that column. */
+/***********************************************************************/
+bool BSONCOL::ParseJpath(PGLOBAL g)
+{
+ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL;
+ int i;
+ bool a;
+
+ if (Parsed)
+ return false; // Already done
+ else if (InitValue(g))
+ return true;
+ else if (!Jpath)
+ Jpath = Name;
+
+ if (To_Tdb->GetOrig()) {
+ // This is an updated column, get nodes from origin
+ for (PBSCOL colp = (PBSCOL)Tbp->GetColumns(); colp;
+ colp = (PBSCOL)colp->GetNext())
+ if (!stricmp(Name, colp->GetName())) {
+ Nod = colp->Nod;
+ Nodes = colp->Nodes;
+ Xpd = colp->Xpd;
+ goto fin;
+ } // endif Name
+
+ sprintf(g->Message, "Cannot parse updated column %s", Name);
+ return true;
+ } // endif To_Orig
+
+ pbuf = PlugDup(g, Jpath);
+ if (*pbuf == '$') pbuf++;
+ if (*pbuf == Sep) pbuf++;
+ if (*pbuf == '[') p1 = pbuf++;
+
+ // Estimate the required number of nodes
+ for (i = 0, p = pbuf; (p = NextChr(p, Sep)); i++, p++)
+ Nod++; // One path node found
+
+ Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE));
+ memset(Nodes, 0, (Nod) * sizeof(JNODE));
+
+ // Analyze the Jpath for this column
+ for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, Sep);
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[ or :[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ // Jpath must be explicit
+ if (a || *p == 0 || *p == '[' || IsNum(p)) {
+ // Analyse intermediate array processing
+ if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
+ return true;
+
+ } else if (*p == '*') {
+ // Return JSON
+ Nodes[i].Op = OP_XX;
+ } else {
+ Nodes[i].Key = p;
+ Nodes[i].Op = OP_EXIST;
+ } // endif's
+
+ } // endfor i, p
+
+ Nod = i;
+
+fin:
+ MulVal = AllocateValue(g, Value);
+ Parsed = true;
+ return false;
+} // end of ParseJpath
+
+/***********************************************************************/
+/* Get Jpath converted to Mongo path. */
+/***********************************************************************/
+PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj)
+{
+ if (Jpath) {
+ char* p1, * p2, * mgopath;
+ int i = 0;
+
+ if (strcmp(Jpath, "*")) {
+ p1 = Jpath;
+ if (*p1 == '$') p1++;
+ if (*p1 == '.') p1++;
+ mgopath = PlugDup(g, p1);
+ } else
+ return NULL;
+
+ for (p1 = p2 = mgopath; *p1; p1++)
+ if (i) { // Inside []
+ if (isdigit(*p1)) {
+ if (!proj)
+ *p2++ = *p1;
+
+ } else if (*p1 == ']' && i == 1) {
+ if (proj && p1[1] == '.')
+ p1++;
+
+ i = 0;
+ } else if (*p1 == '.' && i == 2) {
+ if (!proj)
+ *p2++ = '.';
+
+ i = 0;
+ } else if (!proj)
+ return NULL;
+
+ } else switch (*p1) {
+ case ':':
+ case '.':
+ if (isdigit(p1[1]))
+ i = 2;
+
+ *p2++ = '.';
+ break;
+ case '[':
+ if (*(p2 - 1) != '.')
+ *p2++ = '.';
+
+ i = 1;
+ break;
+ case '*':
+ if (*(p2 - 1) == '.' && !*(p1 + 1)) {
+ p2--; // Suppress last :*
+ break;
+ } // endif p2
+
+ default:
+ *p2++ = *p1;
+ break;
+ } // endswitch p1;
+
+ *p2 = 0;
+ return mgopath;
+ } else
+ return NULL;
+
+} // end of GetJpath
+
+/***********************************************************************/
+/* ReadColumn: */
+/***********************************************************************/
+void BSONCOL::ReadColumn(PGLOBAL g)
+{
+ if (!Tbp->SameRow || Xnod >= Tbp->SameRow)
+ Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0));
+
+#if defined(DEVELOPMENT)
+ if (Xpd && Value->IsNull() && !((PBDEF)Tbp->To_Def)->Accept)
+ htrc("Null expandable JSON value for column %s\n", Name);
+#endif // DEVELOPMENT
+
+ // Set null when applicable
+ if (!Nullable)
+ Value->SetNull(false);
+
+} // end of ReadColumn
+
+/***********************************************************************/
+/* WriteColumn: */
+/***********************************************************************/
+void BSONCOL::WriteColumn(PGLOBAL g)
+{
+ if (Xpd && Tbp->Pretty < 2) {
+ strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
+ throw 666;
+ } // endif Xpd
+
+ /*********************************************************************/
+ /* Check whether this node must be written. */
+ /*********************************************************************/
+ if (Value != To_Val)
+ Value->SetValue_pval(To_Val, FALSE); // Convert the updated value
+
+ /*********************************************************************/
+ /* On INSERT Null values are represented by no node. */
+ /*********************************************************************/
+ if (Value->IsNull() && Tbp->Mode == MODE_INSERT)
+ return;
+
+ PBVAL jsp, row = Cp->GetRow(g);
+
+ if (row) switch (Buf_Type) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ case TYPE_INT:
+ case TYPE_TINY:
+ case TYPE_SHORT:
+ case TYPE_BIGINT:
+ case TYPE_DOUBLE:
+ if (Buf_Type == TYPE_STRING && Nodes[Nod - 1].Op == OP_XX) {
+ char *s = Value->GetCharValue();
+
+ if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) {
+ strcpy(g->Message, s);
+ throw 666;
+ } // endif jsp
+
+ switch (row->Type) {
+ case TYPE_JAR:
+ if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ)
+ Cp->SetArrayValue(row, jsp, Nodes[Nod - 2].Rank);
+ else
+ Cp->AddArrayValue(row, jsp);
+
+ break;
+ case TYPE_JOB:
+ if (Nod > 1 && Nodes[Nod - 2].Key)
+ Cp->SetKeyValue(row, jsp, Nodes[Nod - 2].Key);
+
+ break;
+ case TYPE_JVAL:
+ default:
+ Cp->SetValueVal(row, jsp);
+ } // endswitch Type
+
+ break;
+ } else
+ jsp = Cp->NewVal(Value);
+
+ switch (row->Type) {
+ case TYPE_JAR:
+ if (Nodes[Nod - 1].Op == OP_EQ)
+ Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank);
+ else
+ Cp->AddArrayValue(row, jsp);
+
+ break;
+ case TYPE_JOB:
+ if (Nodes[Nod - 1].Key)
+ Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key);
+
+ break;
+ case TYPE_JVAL:
+ default:
+ Cp->SetValueVal(row, jsp);
+ } // endswitch Type
+
+ break;
+ default: // ??????????
+ sprintf(g->Message, "Invalid column type %d", Buf_Type);
+ } // endswitch Type
+
+} // end of WriteColumn
+
+/* -------------------------- Class TDBBSON -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBBSON class. */
+/***********************************************************************/
+TDBBSON::TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBBSN(g, tdp, txfp)
+{
+ Docp = NULL;
+ Multiple = tdp->Multiple;
+ Done = Changed = false;
+ Bp->SetPretty(2);
+} // end of TDBBSON standard constructor
+
+TDBBSON::TDBBSON(PBTDB tdbp) : TDBBSN(tdbp)
+{
+ Docp = tdbp->Docp;
+ Multiple = tdbp->Multiple;
+ Done = tdbp->Done;
+ Changed = tdbp->Changed;
+} // end of TDBBSON copy constructor
+
+// Used for update
+PTDB TDBBSON::Clone(PTABS t)
+{
+ PTDB tp;
+ PBSCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBBSON(this);
+
+ for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) {
+ cp2 = new(g) BSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+} // end of Clone
+
+/***********************************************************************/
+/* Make the document tree from the object path. */
+/***********************************************************************/
+int TDBBSON::MakeNewDoc(PGLOBAL g)
+{
+ // Create a void table that will be populated
+ Docp = Bp->NewVal(TYPE_JAR);
+
+ if (!(Top = Bp->MakeTopTree(g, TYPE_JAR)))
+ return RC_FX;
+
+ Docp = Row;
+ Done = true;
+ return RC_OK;
+} // end of MakeNewDoc
+
+/***********************************************************************/
+/* Make the document tree from a file. */
+/***********************************************************************/
+int TDBBSON::MakeDocument(PGLOBAL g)
+{
+ char *p, *p1, *p2, *memory, *objpath, *key = NULL;
+ int i = 0;
+ size_t len;
+ my_bool a;
+ MODE mode = Mode;
+ PBVAL jsp;
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+ PBVAL val = NULL;
+
+ if (Done)
+ return RC_OK;
+
+ /*********************************************************************/
+ /* Create the mapping file object in mode read. */
+ /*********************************************************************/
+ Mode = MODE_READ;
+
+ if (!Txfp->OpenTableFile(g)) {
+ PFBLOCK fp = Txfp->GetTo_Fb();
+
+ if (fp) {
+ len = fp->Length;
+ memory = fp->Memory;
+ } else {
+ Mode = mode; // Restore saved Mode
+ return MakeNewDoc(g);
+ } // endif fp
+
+ } else
+ return RC_FX;
+
+ /*********************************************************************/
+ /* Parse the json file and allocate its tree structure. */
+ /*********************************************************************/
+ g->Message[0] = 0;
+ jsp = Top = Bp->ParseJson(g, memory, len);
+ Txfp->CloseTableFile(g, false);
+ Mode = mode; // Restore saved Mode
+
+ if (!jsp && g->Message[0])
+ return RC_FX;
+
+ if ((objpath = PlugDup(g, Objname))) {
+ if (*objpath == '$') objpath++;
+ if (*objpath == '.') objpath++;
+ p1 = (*objpath == '[') ? objpath++ : NULL;
+
+ /*********************************************************************/
+ /* Find the table in the tree structure. */
+ /*********************************************************************/
+ for (p = objpath; jsp && p; p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, '.');
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ if (!a && *p && *p != '[' && !IsNum(p)) {
+ // obj is a key
+ if (jsp->Type != TYPE_JOB) {
+ strcpy(g->Message, "Table path does not match the json file");
+ return RC_FX;
+ } // endif Type
+
+ key = p;
+ objp = jsp;
+ arp = NULL;
+ val = Bp->GetKeyValue(objp, key);
+
+ if (!val || !(jsp = Bp->GetBson(val))) {
+ sprintf(g->Message, "Cannot find object key %s", key);
+ return RC_FX;
+ } // endif val
+
+ } else {
+ if (*p == '[') {
+ // Old style
+ if (p[strlen(p) - 1] != ']') {
+ sprintf(g->Message, "Invalid Table path near %s", p);
+ return RC_FX;
+ } else
+ p++;
+
+ } // endif p
+
+ if (jsp->Type != TYPE_JAR) {
+ strcpy(g->Message, "Table path does not match the json file");
+ return RC_FX;
+ } // endif Type
+
+ arp = jsp;
+ objp = NULL;
+ i = atoi(p) - B;
+ val = Bp->GetArrayValue(arp, i);
+
+ if (!val) {
+ sprintf(g->Message, "Cannot find array value %d", i);
+ return RC_FX;
+ } // endif val
+
+ } // endif
+
+ jsp = val;
+ } // endfor p
+
+ } // endif objpath
+
+ if (jsp && jsp->Type == TYPE_JAR)
+ Docp = jsp;
+ else {
+ // The table is void or is just one object or one value
+ if (objp) {
+ Docp = Bp->GetKeyValue(objp, key);
+ Docp->To_Val = Bp->MOF(Bp->DupVal(Docp));
+ Docp->Type = TYPE_JAR;
+ } else if (arp) {
+ Docp = Bp->NewVal(TYPE_JAR);
+ Bp->AddArrayValue(Docp, jsp);
+ Bp->SetArrayValue(arp, Docp, i);
+ } else {
+ Top = Docp = Bp->NewVal(TYPE_JAR);
+ Bp->AddArrayValue(Docp, jsp);
+ } // endif's
+
+ } // endif jsp
+
+ Done = true;
+ return RC_OK;
+} // end of MakeDocument
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBBSON::Cardinality(PGLOBAL g)
+{
+ if (!g)
+ return (Xcol || Multiple) ? 0 : 1;
+ else if (Cardinal < 0) {
+ if (!Multiple) {
+ if (MakeDocument(g) == RC_OK)
+ Cardinal = Bp->GetSize(Docp);
+
+ } else
+ return 10;
+
+ } // endif Cardinal
+
+ return Cardinal;
+} // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns table size estimate in number of rows. */
+/***********************************************************************/
+int TDBBSON::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0)
+ MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* ResetSize: call by TDBMUL when calculating size estimate. */
+/***********************************************************************/
+void TDBBSON::ResetSize(void)
+{
+ MaxSize = Cardinal = -1;
+ Fpos = -1;
+ N = 0;
+ Done = false;
+} // end of ResetSize
+
+/***********************************************************************/
+/* TDBBSON is not indexable. */
+/***********************************************************************/
+int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
+{
+ if (pxdf) {
+ strcpy(g->Message, "JSON not indexable when pretty = 2");
+ return RC_FX;
+ } else
+ return RC_OK;
+
+} // end of MakeIndex
+
+/***********************************************************************/
+/* Return the position in the table. */
+/***********************************************************************/
+int TDBBSON::GetRecpos(void)
+{
+#if 0
+ union {
+ uint Rpos;
+ BYTE Spos[4];
+ };
+
+ Rpos = htonl(Fpos);
+ Spos[0] = (BYTE)NextSame;
+ return Rpos;
+#endif // 0
+ return Fpos;
+} // end of GetRecpos
+
+/***********************************************************************/
+/* Set the position in the table. */
+/***********************************************************************/
+bool TDBBSON::SetRecpos(PGLOBAL, int recpos)
+{
+#if 0
+ union {
+ uint Rpos;
+ BYTE Spos[4];
+ };
+
+ Rpos = recpos;
+ NextSame = Spos[0];
+ Spos[0] = 0;
+ Fpos = (signed)ntohl(Rpos);
+
+ //if (Fpos != (signed)ntohl(Rpos)) {
+ // Fpos = ntohl(Rpos);
+ // same = false;
+ //} else
+ // same = true;
+#endif // 0
+
+ Fpos = recpos - 1;
+ return false;
+} // end of SetRecpos
+
+/***********************************************************************/
+/* JSON Access Method opening routine. */
+/***********************************************************************/
+bool TDBBSON::OpenDB(PGLOBAL g)
+{
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. */
+ /*******************************************************************/
+ Fpos = -1;
+ NextSame = false;
+ SameRow = 0;
+ return false;
+ } // endif use
+
+/*********************************************************************/
+/* OpenDB: initialize the JSON file processing. */
+/*********************************************************************/
+ if (MakeDocument(g) != RC_OK)
+ return true;
+
+ if (Mode == MODE_INSERT)
+ switch (Jmode) {
+ case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break;
+ case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break;
+ case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ if (Xcol)
+ To_Filter = NULL; // Imcompatible
+
+ Use = USE_OPEN;
+ return false;
+} // end of OpenDB
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::ReadDB(PGLOBAL)
+{
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = false;
+ M++;
+ rc = RC_OK;
+ } else if (++Fpos < (signed)Bp->GetSize(Docp)) {
+ Row = Bp->GetArrayValue(Docp, Fpos);
+
+ if (Row->Type == TYPE_JVAL)
+ Row = Bp->GetBson(Row);
+
+ SameRow = 0;
+ M = 1;
+ rc = RC_OK;
+ } else
+ rc = RC_EF;
+
+ return rc;
+} // end of ReadDB
+
+/***********************************************************************/
+/* WriteDB: Data Base write routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::WriteDB(PGLOBAL g)
+{
+ if (Mode == MODE_INSERT) {
+ Bp->AddArrayValue(Docp, Row);
+
+ switch(Jmode) {
+ case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break;
+ case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break;
+ default: Row = Bp->NewVal(); break;
+ } // endswitch Jmode
+
+ } else
+ Bp->SetArrayValue(Docp, Row, Fpos);
+
+ Changed = true;
+ return RC_OK;
+} // end of WriteDB
+
+/***********************************************************************/
+/* Data Base delete line routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::DeleteDB(PGLOBAL g, int irc)
+{
+ if (irc == RC_OK)
+ // Deleted current row
+ Bp->DeleteValue(Docp, Fpos);
+ else if (irc == RC_FX)
+ // Delete all
+ Docp->To_Val = 0;
+
+ Changed = true;
+ return RC_OK;
+} // end of DeleteDB
+
+/***********************************************************************/
+/* Data Base close routine for JSON access methods. */
+/***********************************************************************/
+void TDBBSON::CloseDB(PGLOBAL g)
+{
+ if (!Changed)
+ return;
+
+ // Save the modified document
+ char filename[_MAX_PATH];
+
+//Docp->InitArray(g);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, ((PBDEF)To_Def)->Fn, GetPath());
+
+ // Serialize the modified table
+ if (!Bp->Serialize(g, Top, filename, Pretty))
+ puts(g->Message);
+
+} // end of CloseDB
+
+/* ---------------------------TDBBCL class --------------------------- */
+
+/***********************************************************************/
+/* TDBBCL class constructor. */
+/***********************************************************************/
+TDBBCL::TDBBCL(PBDEF tdp) : TDBCAT(tdp) {
+ Topt = tdp->GetTopt();
+ Db = tdp->Schema;
+ Dsn = tdp->Uri;
+} // end of TDBBCL constructor
+
+/***********************************************************************/
+/* GetResult: Get the list the JSON file columns. */
+/***********************************************************************/
+PQRYRES TDBBCL::GetResult(PGLOBAL g) {
+ return BSONColumns(g, Db, Dsn, Topt, false);
+} // end of GetResult
+
+/* --------------------------- End of json --------------------------- */
diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h
new file mode 100644
index 00000000000..adb02dd28e4
--- /dev/null
+++ b/storage/connect/tabbson.h
@@ -0,0 +1,339 @@
+/*************** tabbson H Declares Source Code File (.H) **************/
+/* Name: tabbson.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BSON classes declares. */
+/***********************************************************************/
+#pragma once
+#include "block.h"
+#include "colblk.h"
+#include "bson.h"
+#include "tabjson.h"
+
+typedef class BTUTIL* PBTUT;
+typedef class BCUTIL* PBCUT;
+typedef class BSONDEF* PBDEF;
+typedef class TDBBSON* PBTDB;
+typedef class BSONCOL* PBSCOL;
+class TDBBSN;
+DllExport PQRYRES BSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool);
+
+/***********************************************************************/
+/* Class used to get the columns of a mongo collection. */
+/***********************************************************************/
+class BSONDISC : public BLOCK {
+public:
+ // Constructor
+ BSONDISC(PGLOBAL g, uint* lg);
+
+ // Functions
+ int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt);
+ bool Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j);
+ void AddColumn(PGLOBAL g);
+
+ // Members
+ JCOL jcol;
+ PJCL jcp, fjcp, pjcp;
+ //PVL vlp;
+ PBDEF tdp;
+ TDBBSN *tjnp;
+ PBTDB tjsp;
+ PBPR jpp;
+ PBVAL jsp;
+ PBPR row;
+ PBTUT bp;
+ PCSZ sep;
+ char colname[65], fmt[129], buf[16];
+ uint *length;
+ int i, n, bf, ncol, lvl, sz, limit;
+ bool all, strfy;
+}; // end of BSONDISC
+
+/***********************************************************************/
+/* JSON table. */
+/***********************************************************************/
+class DllExport BSONDEF : public DOSDEF { /* Table description */
+ friend class TDBBSON;
+ friend class TDBBSN;
+ friend class TDBBCL;
+ friend class BSONDISC;
+ friend class BSONCOL;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructor
+ BSONDEF(void);
+
+ // Implementation
+ virtual const char* GetType(void) { return "BSON"; }
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+protected:
+ // Members
+ PGLOBAL G; /* Bson utility memory */
+ JMODE Jmode; /* MODE_OBJECT by default */
+ PCSZ Objname; /* Name of first level object */
+ PCSZ Xcol; /* Name of expandable column */
+ int Limit; /* Limit of multiple values */
+ int Pretty; /* Depends on file structure */
+ int Base; /* The array index base */
+ bool Strict; /* Strict syntax checking */
+ char Sep; /* The Jpath separator */
+ const char* Uri; /* MongoDB connection URI */
+ PCSZ Collname; /* External collection name */
+ PSZ Options; /* Colist ; Pipe */
+ PSZ Filter; /* Filter */
+ PSZ Driver; /* MongoDB Driver (C or JAVA) */
+ bool Pipe; /* True if Colist is a pipeline */
+ int Version; /* Driver version */
+ PSZ Wrapname; /* MongoDB java wrapper name */
+}; // end of BSONDEF
+
+
+/* -------------------------- BTUTIL class --------------------------- */
+
+/***********************************************************************/
+/* Handles all BJSON actions for a BSON table. */
+/***********************************************************************/
+class BTUTIL : public BDOC {
+public:
+ // Constructor
+ BTUTIL(PGLOBAL G, TDBBSN* tp) : BDOC(G) { Tp = tp; }
+
+ // Utility functions
+ PBVAL FindRow(PGLOBAL g);
+ PBVAL ParseLine(PGLOBAL g, int prty, bool cma);
+ PBVAL MakeTopTree(PGLOBAL g, int type);
+ PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty);
+
+protected:
+ // Members
+ TDBBSN* Tp;
+}; // end of class BTUTIL
+
+/* -------------------------- BCUTIL class --------------------------- */
+
+/***********************************************************************/
+/* Handles all BJSON actions for a BSON columns. */
+/***********************************************************************/
+class BCUTIL : public BTUTIL {
+public:
+ // Constructor
+ BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp)
+ { Cp = cp; Jb = false; }
+
+ // Utility functions
+ void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp);
+ PBVAL MakeBson(PGLOBAL g, PBVAL jsp, int n);
+ PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i);
+ PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i);
+ PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n);
+ PBVAL GetRow(PGLOBAL g);
+
+protected:
+ // Member
+ PBSCOL Cp;
+ bool Jb;
+}; // end of class BCUTIL
+
+ /* -------------------------- TDBBSN class --------------------------- */
+
+/***********************************************************************/
+/* This is the BSN Access Method class declaration. */
+/* The table is a DOS file, each record being a JSON object. */
+/***********************************************************************/
+class DllExport TDBBSN : public TDBDOS {
+ friend class BSONCOL;
+ friend class BSONDEF;
+ friend class BTUTIL;
+ friend class BCUTIL;
+ friend class BSONDISC;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructor
+ TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp);
+ TDBBSN(TDBBSN* tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) { return TYPE_AM_JSN; }
+ virtual bool SkipHeader(PGLOBAL g);
+ virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSN(this); }
+ PBVAL GetRow(void) { return Row; }
+
+ // Methods
+ virtual PTDB Clone(PTABS t);
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual PCOL InsertSpecialColumn(PCOL colp);
+ virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;}
+ virtual bool CanBeFiltered(void)
+ {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;}
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual void CloseDB(PGLOBAL g);
+
+ // Specific routine
+ virtual int EstimatedLength(void);
+
+protected:
+ PBVAL FindRow(PGLOBAL g);
+//int MakeTopTree(PGLOBAL g, PBVAL jsp);
+
+ // Members
+ PBTUT Bp; // The BSUTIL handling class
+ PBVAL Top; // The top JSON tree
+ PBVAL Row; // The current row
+ PBSCOL Colp; // The multiple column
+ JMODE Jmode; // MODE_OBJECT by default
+ PCSZ Objname; // The table object name
+ PCSZ Xcol; // Name of expandable column
+ int Fpos; // The current row index
+ int N; // The current Rownum
+ int M; // Index of multiple value
+ int Limit; // Limit of multiple values
+ int Pretty; // Depends on file structure
+ int NextSame; // Same next row
+ int SameRow; // Same row nb
+ int Xval; // Index of expandable array
+ int B; // Array index base
+ char Sep; // The Jpath separator
+ bool Strict; // Strict syntax checking
+ bool Comma; // Row has final comma
+}; // end of class TDBBSN
+
+/* -------------------------- BSONCOL class -------------------------- */
+
+/***********************************************************************/
+/* Class BSONCOL: JSON access method column descriptor. */
+/***********************************************************************/
+class DllExport BSONCOL : public DOSCOL {
+ friend class TDBBSN;
+ friend class TDBBSON;
+ friend class BCUTIL;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructors
+ BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
+ BSONCOL(BSONCOL* colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) { return Tbp->GetAmType(); }
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ bool ParseJpath(PGLOBAL g);
+ virtual PSZ GetJpath(PGLOBAL g, bool proj);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+protected:
+ bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b);
+ bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm);
+
+ // Default constructor not to be used
+ BSONCOL(void) {}
+
+ // Members
+ TDBBSN *Tbp; // To the JSN table block
+ PBCUT Cp; // To the BCUTIL handling class
+ PVAL MulVal; // To value used by multiple column
+ char *Jpath; // The json path
+ JNODE *Nodes; // The intermediate objects
+ int Nod; // The number of intermediate objects
+ int Xnod; // Index of multiple values
+ char Sep; // The Jpath separator
+ bool Xpd; // True for expandable column
+ bool Parsed; // True when parsed
+ bool Warned; // True when warning issued
+}; // end of class BSONCOL
+
+/* -------------------------- TDBBSON class -------------------------- */
+
+/***********************************************************************/
+/* This is the JSON Access Method class declaration. */
+/***********************************************************************/
+class DllExport TDBBSON : public TDBBSN {
+ friend class BSONDEF;
+ friend class BSONCOL;
+public:
+ // Constructor
+ TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp);
+ TDBBSON(PBTDB tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) { return TYPE_AM_JSON; }
+ virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSON(this); }
+ PBVAL GetDoc(void) { return Docp; }
+
+ // Methods
+ virtual PTDB Clone(PTABS t);
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual void ResetSize(void);
+ virtual int GetProgCur(void) { return N; }
+ virtual int GetRecpos(void);
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g) { return false; }
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+ int MakeDocument(PGLOBAL g);
+
+ // Optimization routines
+ virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add);
+
+protected:
+ int MakeNewDoc(PGLOBAL g);
+
+ // Members
+ PBVAL Docp; // The document array
+ int Multiple; // 0: No 1: DIR 2: Section 3: filelist
+ bool Done; // True when document parsing is done
+ bool Changed; // After Update, Insert or Delete
+}; // end of class TDBBSON
+
+/***********************************************************************/
+/* This is the class declaration for the JSON catalog table. */
+/***********************************************************************/
+class DllExport TDBBCL : public TDBCAT {
+public:
+ // Constructor
+ TDBBCL(PBDEF tdp);
+
+protected:
+ // Specific routines
+ virtual PQRYRES GetResult(PGLOBAL g);
+
+ // Members
+ PTOS Topt;
+ PCSZ Db;
+ PCSZ Dsn;
+}; // end of class TDBBCL
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 3002f8906ed..fa764b1f84d 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -1995,7 +1995,7 @@ int TDBDOS::Cardinality(PGLOBAL g)
if (Mode == MODE_ANY && ExactInfo()) {
// Using index impossible or failed, do it the hard way
Mode = MODE_READ;
- To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1);
+ To_Line = (char*)PlugSubAlloc(g, NULL, (size_t)Lrecl + 1);
if (Txfp->OpenTableFile(g))
return (Cardinal = Txfp->Cardinality(g));
@@ -2145,6 +2145,9 @@ bool TDBDOS::OpenDB(PGLOBAL g)
} // endif use
if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS
+#if defined(BSON_SUPPORT)
+ && Txfp->GetAmType() != TYPE_AM_BIN
+#endif // BSON_SUPPORT
&& Txfp->GetAmType() != TYPE_AM_MGO) {
// Delete all lines. Not handled in MAP or block mode
Txfp = new(g) DOSFAM((PDOSDEF)To_Def);
@@ -2229,7 +2232,7 @@ int TDBDOS::ReadDB(PGLOBAL g)
return RC_EF;
case -2: // No match for join
return RC_NF;
- case -3: // Same record as last non null one
+ case -3: // Same record as non null last one
num_there++;
return RC_OK;
default:
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index eed67525f78..37d28b96517 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -67,7 +67,7 @@
/* This should be an option. */
/***********************************************************************/
#define MAXCOL 200 /* Default max column nb in result */
-#define TYPE_UNKNOWN 10 /* Must be greater than other types */
+#define TYPE_UNKNOWN 12 /* Must be greater than other types */
/***********************************************************************/
/* External function. */
@@ -311,14 +311,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
} else if (*p == q) {
if (phase == 0) {
- if (blank)
- {
+ if (blank) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
n = 0;
phase = digit = 1;
} else if (phase == 1) {
@@ -342,14 +342,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
goto skip;
} else {
- if (phase == 2)
- {
+ if (phase == 2) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
// isdigit cannot be used here because of debug assert
if (!strchr("0123456789", *p)) {
if (!digit && *p == dechar)
@@ -364,14 +364,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
blank = 1;
} // endif's *p
- if (phase == 1)
- {
+ if (phase == 1) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(UNBALANCE_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
if (n) {
len[i] = MY_MAX(len[i], n);
type = (digit || n == 0 || (dec && n == 1)) ? TYPE_STRING
@@ -744,8 +744,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
int i, len;
PCSVCOL colp;
- if (!Fields) // May have been set in TABFMT::OpenDB
- {
+ if (!Fields) { // May have been set in TABFMT::OpenDB
if (Mode != MODE_UPDATE && Mode != MODE_INSERT) {
for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next)
if (!colp->IsSpecial() && !colp->IsVirtual())
@@ -759,6 +758,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
if (!cdp->IsSpecial() && !cdp->IsVirtual())
Fields++;
}
+
Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields);
Fldlen = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields);
@@ -778,8 +778,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
} // endfor i
- if (Field)
- {
+ if (Field) {
// Prepare writing fields
if (Mode != MODE_UPDATE) {
for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next)
@@ -803,6 +802,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
Fldtyp[i] = IsTypeNum(cdp->GetType());
} // endif cdp
}
+
} // endif Use
if (Header) {
@@ -1051,8 +1051,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
if (i)
strcat(To_Line, sep);
- if (Field[i])
- {
+ if (Field[i]) {
if (!strlen(Field[i])) {
// Generally null fields are not quoted
if (Quoted > 2)
@@ -1060,7 +1059,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
strcat(strcat(To_Line, qot), qot);
} else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot
- || Quoted > 1 || (Quoted == 1 && !Fldtyp[i])))
+ || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) {
if (strchr(Field[i], Qot)) {
// Field contains quotes that must be doubled
int j, k = strlen(To_Line), n = strlen(Field[i]);
@@ -1078,10 +1077,12 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
To_Line[k] = '\0';
} else
strcat(strcat(strcat(To_Line, qot), Field[i]), qot);
+ }
else
strcat(To_Line, Field[i]);
}
+
} // endfor i
#if defined(_DEBUG)
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index dbcd590c3de..402a0a1de37 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -1,8 +1,9 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
-/* PROGRAM NAME: tabjson Version 1.7 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2019 */
+/* PROGRAM NAME: tabjson Version 1.8 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
+#undef BSON_SUPPORT
/***********************************************************************/
/* Include relevant sections of the MariaDB header file. */
@@ -46,7 +47,7 @@
/* This should be an option. */
/***********************************************************************/
#define MAXCOL 200 /* Default max column nb in result */
-#define TYPE_UNKNOWN 12 /* Must be greater than other types */
+//#define TYPE_UNKNOWN 12 /* Must be greater than other types */
/***********************************************************************/
/* External functions. */
@@ -114,7 +115,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
/*********************************************************************/
for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) {
if (jcp->Type == TYPE_UNKNOWN)
- jcp->Type = TYPE_STRING; // Void column
+ jcp->Type = TYPE_STRG; // Void column
crp = qrp->Colresp; // Column Name
crp->Kdata->SetValue(jcp->Name, i);
@@ -152,26 +153,29 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
{
length = lg;
jcp = fjcp = pjcp = NULL;
+ tdp = NULL;
tjnp = NULL;
jpp = NULL;
tjsp = NULL;
jsp = NULL;
row = NULL;
sep = NULL;
- i = n = bf = ncol = lvl = sz = 0;
+ i = n = bf = ncol = lvl = sz = limit = 0;
all = strfy = false;
} // end of JSONDISC constructor
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
- char filename[_MAX_PATH];
- bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ char filename[_MAX_PATH];
+ bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ PGLOBAL G = NULL;
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
sep = GetStringTableOption(g, topt, "Separator", ".");
sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
- strfy = GetBooleanTableOption(g, topt, "Stringify", false);
+ limit = GetIntegerTableOption(g, topt, "Limit", 10);
+ strfy = GetBooleanTableOption(g, topt, "Stringify", false);
/*********************************************************************/
/* Open the input file. */
@@ -240,7 +244,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (tjsp->MakeDocument(g))
return 0;
- jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL;
+ jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetArrayValue(0) : NULL;
} else {
if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))
{
@@ -286,18 +290,15 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif
} // endif Driver
- } else
+ } else if (tdp->Pretty >= 0)
tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp));
+ else
+ tjnp = new(g) TDBJSN(tdp, new(g) BINFAM(tdp));
tjnp->SetMode(MODE_READ);
// Allocate the parse work memory
- PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL));
- memset(G, 0, sizeof(GLOBAL));
- G->Sarea_Size = (size_t)tdp->Lrecl * 10;
- G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
- PlugSubSet(G->Sarea, G->Sarea_Size);
- G->jump_level = 0;
+ G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2));
tjnp->SetG(G);
if (tjnp->OpenDB(g))
@@ -309,7 +310,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
- jsp = tjnp->GetRow();
+// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB
+ jsp = tjnp->Row;
} // endswitch ReadDB
} // endif pretty
@@ -335,11 +337,11 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
/* Analyse the JSON tree and define columns. */
/*********************************************************************/
for (i = 1; ; i++) {
- for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) {
- strncpy(colname, jpp->GetKey(), 64);
+ for (jpp = row->GetFirst(); jpp; jpp = jpp->Next) {
+ strncpy(colname, jpp->Key, 64);
fmt[bf] = 0;
- if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0)))
+ if (Find(g, jpp->Val, colname, MY_MIN(lvl, 0)))
goto err;
} // endfor jpp
@@ -359,11 +361,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
- jsp = tjnp->GetRow();
+// jsp = tjnp->FindRow(g);
+ jsp = tjnp->Row;
} // endswitch ReadDB
} else
- jsp = tjsp->GetDoc()->GetValue(i);
+ jsp = tjsp->GetDoc()->GetArrayValue(i);
if (!(row = (jsp) ? jsp->GetObject() : NULL))
break;
@@ -390,14 +393,35 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
PJOB job;
PJAR jar;
- if ((valp = jvp ? jvp->GetValue() : NULL)) {
- if (JsonAllPath() && !fmt[bf])
+ if (jvp && jvp->DataType != TYPE_JSON) {
+ if (JsonAllPath() && !fmt[bf])
strcat(fmt, colname);
- jcol.Type = valp->GetType();
- jcol.Len = valp->GetValLen();
- jcol.Scale = valp->GetValPrec();
- jcol.Cbn = valp->IsNull();
+ jcol.Type = jvp->DataType;
+
+ switch (jvp->DataType) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ jcol.Len = (int)strlen(jvp->Strp);
+ break;
+ case TYPE_INTG:
+ case TYPE_BINT:
+ jcol.Len = (int)strlen(jvp->GetString(g));
+ break;
+ case TYPE_DBL:
+ jcol.Len = (int)strlen(jvp->GetString(g));
+ jcol.Scale = jvp->Nd;
+ break;
+ case TYPE_BOOL:
+ jcol.Len = 1;
+ break;
+ default:
+ jcol.Len = 0;
+ break;
+ } // endswitch Type
+
+ jcol.Scale = jvp->Nd;
+ jcol.Cbn = jvp->DataType == TYPE_NULL;
} else if (!jvp || jvp->IsNull()) {
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
@@ -413,8 +437,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
case TYPE_JOB:
job = (PJOB)jsp;
- for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) {
- PCSZ k = jrp->GetKey();
+ for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->Next) {
+ PCSZ k = jrp->Key;
if (*k != '$') {
n = sizeof(fmt) - strlen(fmt) -1;
@@ -423,7 +447,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(strncat(colname, "_", n), k, n - 1);
} // endif Key
- if (Find(g, jrp->GetVal(), k, j + 1))
+ if (Find(g, jrp->Val, k, j + 1))
return true;
*p = *pc = 0;
@@ -434,7 +458,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
jar = (PJAR)jsp;
if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key)))
- ars = jar->GetSize(false);
+ ars = MY_MIN(jar->GetSize(false), limit);
else
ars = MY_MIN(jar->GetSize(false), 1);
@@ -460,7 +484,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
}
- if (Find(g, jar->GetValue(k), "", j))
+ if (Find(g, jar->GetArrayValue(k), "", j))
return true;
*p = *pc = 0;
@@ -481,7 +505,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} else if (JsonAllPath() && !fmt[bf])
strcat(fmt, colname);
- jcol.Type = TYPE_STRING;
+ jcol.Type = TYPE_STRG;
jcol.Len = sz;
jcol.Scale = 0;
jcol.Cbn = true;
@@ -503,10 +527,29 @@ void JSONDISC::AddColumn(PGLOBAL g)
if (jcp) {
if (jcp->Type != jcol.Type) {
- if (jcp->Type == TYPE_UNKNOWN)
+ if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL)
jcp->Type = jcol.Type;
- else if (jcol.Type != TYPE_UNKNOWN)
- jcp->Type = TYPE_STRING;
+// else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID)
+// jcp->Type = TYPE_STRING;
+ else if (jcp->Type != TYPE_STRG)
+ switch (jcol.Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ jcp->Type = jcol.Type;
+ break;
+ case TYPE_BINT:
+ if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ case TYPE_INTG:
+ if (jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ default:
+ break;
+ } // endswith Type
} // endif Type
@@ -625,9 +668,9 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
PTXF txfp = NULL;
// JSN not used for pretty=1 for insert or delete
- if (!Pretty || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
+ if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
USETEMP tmp = UseTemp();
- bool map = Mapped && m != MODE_INSERT &&
+ bool map = Mapped && Pretty >= 0 && m != MODE_INSERT &&
!(tmp != TMP_NO && m == MODE_UPDATE) &&
!(tmp == TMP_FORCE &&
(m == MODE_UPDATE || m == MODE_DELETE));
@@ -684,21 +727,26 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
#endif // !GZ_SUPPORT
} else if (map)
txfp = new(g) MAPFAM(this);
- else
+ else if (Pretty < 0) // BJsonfile
+ txfp = new(g) BINFAM(this);
+ else
txfp = new(g) DOSFAM(this);
- // Txfp must be set for TDBDOS
+ // Txfp must be set for TDBJSN
tdbp = new(g) TDBJSN(this, txfp);
if (Lrecl) {
// Allocate the parse work memory
+#if 0
PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL));
memset(G, 0, sizeof(GLOBAL));
- G->Sarea_Size = Lrecl * 10;
+ G->Sarea_Size = (size_t)Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
PlugSubSet(G->Sarea, G->Sarea_Size);
G->jump_level = 0;
((TDBJSN*)tdbp)->G = G;
+#endif // 0
+ ((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 10 : 2));
} else {
strcpy(g->Message, "LRECL is not defined");
return NULL;
@@ -736,10 +784,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
/* --------------------------- Class TDBJSN -------------------------- */
/***********************************************************************/
-/* Implementation of the TDBJSN class. */
+/* Implementation of the TDBJSN class (Pretty < 2) */
/***********************************************************************/
TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
- {
+{
G = NULL;
Top = NULL;
Row = NULL;
@@ -772,35 +820,35 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
SameRow = 0;
Xval = -1;
Comma = false;
- } // end of TDBJSN standard constructor
+} // end of TDBJSN standard constructor
-TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
- {
- G = NULL;
- Top = tdbp->Top;
- Row = tdbp->Row;
- Val = tdbp->Val;
- Colp = tdbp->Colp;
- Jmode = tdbp->Jmode;
- Objname = tdbp->Objname;
- Xcol = tdbp->Xcol;
- Fpos = tdbp->Fpos;
- N = tdbp->N;
- M = tdbp->M;
- Limit = tdbp->Limit;
- NextSame = tdbp->NextSame;
- SameRow = tdbp->SameRow;
- Xval = tdbp->Xval;
- B = tdbp->B;
- Sep = tdbp->Sep;
- Pretty = tdbp->Pretty;
- Strict = tdbp->Strict;
- Comma = tdbp->Comma;
- } // end of TDBJSN copy constructor
+TDBJSN::TDBJSN(TDBJSN* tdbp) : TDBDOS(NULL, tdbp)
+{
+ G = NULL;
+ Top = tdbp->Top;
+ Row = tdbp->Row;
+ Val = tdbp->Val;
+ Colp = tdbp->Colp;
+ Jmode = tdbp->Jmode;
+ Objname = tdbp->Objname;
+ Xcol = tdbp->Xcol;
+ Fpos = tdbp->Fpos;
+ N = tdbp->N;
+ M = tdbp->M;
+ Limit = tdbp->Limit;
+ NextSame = tdbp->NextSame;
+ SameRow = tdbp->SameRow;
+ Xval = tdbp->Xval;
+ B = tdbp->B;
+ Sep = tdbp->Sep;
+ Pretty = tdbp->Pretty;
+ Strict = tdbp->Strict;
+ Comma = tdbp->Comma;
+} // end of TDBJSN copy constructor
// Used for update
PTDB TDBJSN::Clone(PTABS t)
- {
+{
G = NULL;
PTDB tp;
PJCOL cp1, cp2;
@@ -814,23 +862,23 @@ PTDB TDBJSN::Clone(PTABS t)
} // endfor cp1
return tp;
- } // end of Clone
+} // end of Clone
/***********************************************************************/
/* Allocate JSN column description block. */
/***********************************************************************/
PCOL TDBJSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
- {
+{
PJCOL colp = new(g) JSONCOL(g, cdp, this, cprec, n);
return (colp->ParseJpath(g)) ? NULL : colp;
- } // end of MakeCol
+} // end of MakeCol
/***********************************************************************/
/* InsertSpecialColumn: Put a special column ahead of the column list.*/
/***********************************************************************/
PCOL TDBJSN::InsertSpecialColumn(PCOL colp)
- {
+{
if (!colp->IsSpecial())
return NULL;
@@ -840,31 +888,47 @@ PCOL TDBJSN::InsertSpecialColumn(PCOL colp)
colp->SetNext(Columns);
Columns = colp;
return colp;
- } // end of InsertSpecialColumn
+} // end of InsertSpecialColumn
+#if 0
/***********************************************************************/
/* JSON Cardinality: returns table size in number of rows. */
/***********************************************************************/
int TDBJSN::Cardinality(PGLOBAL g)
- {
+{
if (!g)
return 0;
- else if (Cardinal < 0)
- Cardinal = TDBDOS::Cardinality(g);
+ else if (Cardinal < 0) {
+ Cardinal = TDBDOS::Cardinality(g);
+
+ } // endif Cardinal
return Cardinal;
- } // end of Cardinality
+} // end of Cardinality
/***********************************************************************/
/* JSON GetMaxSize: returns file size estimate in number of lines. */
/***********************************************************************/
int TDBJSN::GetMaxSize(PGLOBAL g)
- {
- if (MaxSize < 0)
- MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
+{
+ if (MaxSize < 0)
+ MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
return MaxSize;
- } // end of GetMaxSize
+} // end of GetMaxSize
+#endif // 0
+
+/***********************************************************************/
+/* JSON EstimatedLength. Returns an estimated minimum line length. */
+/***********************************************************************/
+int TDBJSN::EstimatedLength(void)
+{
+ if (AvgLen <= 0)
+ return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better
+ else
+ return AvgLen;
+
+} // end of Estimated Length
/***********************************************************************/
/* Find the row in the tree structure. */
@@ -881,7 +945,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key
val = (jsp->GetType() == TYPE_JOB) ?
- jsp->GetObject()->GetValue(objpath) : NULL;
+ jsp->GetObject()->GetKeyValue(objpath) : NULL;
} else {
if (*objpath == '[') {
if (objpath[strlen(objpath) - 1] == ']')
@@ -891,7 +955,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
} // endif [
val = (jsp->GetType() == TYPE_JAR) ?
- jsp->GetArray()->GetValue(atoi(objpath) - B) : NULL;
+ jsp->GetArray()->GetArrayValue(atoi(objpath) - B) : NULL;
} // endif objpath
jsp = (val) ? val->GetJson() : NULL;
@@ -904,7 +968,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
/* OpenDB: Data Base open routine for JSN access method. */
/***********************************************************************/
bool TDBJSN::OpenDB(PGLOBAL g)
- {
+{
if (Use == USE_OPEN) {
/*******************************************************************/
/* Table already open replace it at its beginning. */
@@ -928,7 +992,51 @@ bool TDBJSN::OpenDB(PGLOBAL g)
} // endif Use
- if (TDBDOS::OpenDB(g))
+ if (Pretty < 0) {
+ /*******************************************************************/
+ /* Binary BJSON table. */
+ /*******************************************************************/
+ xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n",
+ this, Tdb_No, Use, Mode);
+
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open, just replace it at its beginning. */
+ /*******************************************************************/
+ if (!To_Kindex) {
+ Txfp->Rewind(); // see comment in Work.log
+ } else // Table is to be accessed through a sorted index table
+ To_Kindex->Reset();
+
+ return false;
+ } // endif use
+
+ /*********************************************************************/
+ /* Open according to logical input/output mode required. */
+ /* Use conventionnal input/output functions. */
+ /*********************************************************************/
+ if (Txfp->OpenTableFile(g))
+ return true;
+
+ Use = USE_OPEN; // Do it now in case we are recursively called
+
+ /*********************************************************************/
+ /* Lrecl is Ok. */
+ /*********************************************************************/
+ size_t linelen = Lrecl;
+ MODE mode = Mode;
+
+ // Buffer must be allocated in g->Sarea
+ Mode = MODE_ANY;
+ Txfp->AllocateBuffer(g);
+ Mode = mode;
+
+ //To_Line = (char*)PlugSubAlloc(g, NULL, linelen);
+ //memset(To_Line, 0, linelen);
+ To_Line = Txfp->GetBuf();
+ xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line);
+ return false;
+ } else if (TDBDOS::OpenDB(g))
return true;
if (Xcol)
@@ -943,7 +1051,7 @@ bool TDBJSN::OpenDB(PGLOBAL g)
/* Kindex construction if the file is accessed using an index. */
/***********************************************************************/
bool TDBJSN::SkipHeader(PGLOBAL g)
- {
+{
int len = GetFileLength(g);
bool rc = false;
@@ -952,62 +1060,71 @@ bool TDBJSN::SkipHeader(PGLOBAL g)
return true;
#endif // _DEBUG
-#if defined(__WIN__)
-#define Ending 2
-#else // !__WIN__
-#define Ending 1
-#endif // !__WIN__
-
if (Pretty == 1) {
if (Mode == MODE_INSERT || Mode == MODE_DELETE) {
// Mode Insert and delete are no more handled here
- assert(false);
- } else if (len) // !Insert && !Delete
+ DBUG_ASSERT(false);
+ } else if (len > 0) // !Insert && !Delete
rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g));
- } // endif Pretty
+ } // endif Pretty
return rc;
- } // end of SkipHeader
+} // end of SkipHeader
/***********************************************************************/
/* ReadDB: Data Base read routine for JSN access method. */
/***********************************************************************/
-int TDBJSN::ReadDB(PGLOBAL g)
- {
- int rc;
+int TDBJSN::ReadDB(PGLOBAL g) {
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = 0;
+ M++;
+ return RC_OK;
+ } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
+ if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
+ return rc; // Deferred reading failed
+
+ if (Pretty >= 0) {
+ // Recover the memory used for parsing
+ PlugSubSet(G->Sarea, G->Sarea_Size);
+
+ if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) {
+ Row = FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } else if (Pretty != 1 || strcmp(To_Line, "]")) {
+ strcpy(g->Message, G->Message);
+ rc = RC_FX;
+ } else
+ rc = RC_EF;
- N++;
+ } else {
+ // Here we get a movable Json binary tree
+ PJSON jsp;
+ SWAP* swp;
- if (NextSame) {
- SameRow = NextSame;
- NextSame = 0;
- M++;
- return RC_OK;
- } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
- if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
- // Deferred reading failed
- return rc;
-
- // Recover the memory used for parsing
- PlugSubSet(G->Sarea, G->Sarea_Size);
-
- if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) {
- Row = FindRow(g);
- SameRow = 0;
- Fpos++;
- M = 1;
- rc = RC_OK;
- } else if (Pretty != 1 || strcmp(To_Line, "]")) {
- strcpy(g->Message, G->Message);
- rc = RC_FX;
- } else
- rc = RC_EF;
+ jsp = (PJSON)To_Line;
+ swp = new(g) SWAP(G, jsp);
+ swp->SwapJson(jsp, false); // Restore pointers from offsets
+ Row = jsp;
+ Row = FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } // endif Pretty
- } // endif ReadDB
+ } // endif ReadDB
- return rc;
- } // end of ReadDB
+ return rc;
+} // end of ReadDB
/***********************************************************************/
/* Make the top tree from the object path. */
@@ -1040,7 +1157,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
val->SetValue(objp);
val = new(g) JVALUE;
- objp->SetValue(g, val, objpath);
+ objp->SetKeyValue(g, val, objpath);
} else {
if (*objpath == '[') {
// Old style
@@ -1062,7 +1179,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
val = new(g) JVALUE;
i = atoi(objpath) - B;
- arp->SetValue(g, val, i);
+ arp->SetArrayValue(g, val, i);
arp->InitArray(g);
} // endif objpath
@@ -1081,8 +1198,8 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
/***********************************************************************/
/* PrepareWriting: Prepare the line for WriteDB. */
/***********************************************************************/
- bool TDBJSN::PrepareWriting(PGLOBAL g)
- {
+bool TDBJSN::PrepareWriting(PGLOBAL g)
+{
PSZ s;
if (MakeTopTree(g, Row))
@@ -1103,7 +1220,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
} else
return true;
- } // end of PrepareWriting
+} // end of PrepareWriting
/***********************************************************************/
/* WriteDB: Data Base write routine for JSON access method. */
@@ -1117,7 +1234,16 @@ int TDBJSN::WriteDB(PGLOBAL g)
return rc;
} // end of WriteDB
-/* ---------------------------- JSONCOL ------------------------------ */
+/***********************************************************************/
+/* Data Base close routine for JSON access method. */
+/***********************************************************************/
+void TDBJSN::CloseDB(PGLOBAL g)
+{
+ TDBDOS::CloseDB(g);
+ G = PlugExit(G);
+} // end of CloseDB
+
+ /* ---------------------------- JSONCOL ------------------------------ */
/***********************************************************************/
/* JSONCOL public constructor. */
@@ -1125,7 +1251,7 @@ int TDBJSN::WriteDB(PGLOBAL g)
JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
: DOSCOL(g, cdp, tdbp, cprec, i, "DOS")
{
- Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
+ Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
G = Tjp->G;
Jpath = cdp->GetFmt();
MulVal = NULL;
@@ -1135,6 +1261,7 @@ JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
Xnod = -1;
Xpd = false;
Parsed = false;
+ Warned = false;
} // end of JSONCOL constructor
/***********************************************************************/
@@ -1153,13 +1280,14 @@ JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp)
Xnod = col1->Xnod;
Xpd = col1->Xpd;
Parsed = col1->Parsed;
+ Warned = col1->Warned;
} // end of JSONCOL copy constructor
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
- {
+{
if (DOSCOL::SetBuffer(g, value, ok, check))
return true;
@@ -1170,13 +1298,13 @@ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Tjp = (TDBJSN*)To_Tdb;
G = Tjp->G;
return false;
- } // end of SetBuffer
+} // end of SetBuffer
/***********************************************************************/
/* Check whether this object is expanded. */
/***********************************************************************/
bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
- {
+{
if ((Tjp->Xcol && nm && !strcmp(nm, Tjp->Xcol) &&
(Tjp->Xval < 0 || Tjp->Xval == i)) || Xpd) {
Xpd = true; // Expandable object
@@ -1187,7 +1315,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
} // endif Xcol
return false;
- } // end of CheckExpand
+} // end of CheckExpand
/***********************************************************************/
/* Analyse array processing options. */
@@ -1487,7 +1615,14 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
{
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
+
+ if (!Warned) {
+ PushWarning(g, Tjp);
+ Warned = true;
+ } // endif Warned
+
Value->Reset();
+#if 0
} else if (Value->GetType() == TYPE_BIN) {
if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500;
@@ -1499,41 +1634,67 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
strcpy(g->Message, "Column size too small");
Value->SetValue_char(NULL, 0);
} // endif Clen
+#endif // 0
} else
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
- } // end of MakeJson
+} // end of MakeJson
/***********************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/***********************************************************************/
-void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
- {
- if (val) {
+void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
+{
+ if (jvp) {
vp->SetNull(false);
- switch (val->GetValType()) {
+ switch (jvp->GetValType()) {
case TYPE_STRG:
case TYPE_INTG:
case TYPE_BINT:
case TYPE_DBL:
case TYPE_DTM:
- vp->SetValue_pval(val->GetValue());
+ switch (vp->GetType()) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ vp->SetValue_psz(jvp->GetString(g));
+ break;
+ case TYPE_INT:
+ case TYPE_SHORT:
+ case TYPE_TINY:
+ vp->SetValue(jvp->GetInteger());
+ break;
+ case TYPE_BIGINT:
+ vp->SetValue(jvp->GetBigint());
+ break;
+ case TYPE_DOUBLE:
+ vp->SetValue(jvp->GetFloat());
+
+ if (jvp->GetValType() == TYPE_DBL)
+ vp->SetPrec(jvp->Nd);
+
+ break;
+ default:
+ sprintf(g->Message, "Unsupported column type %d\n", vp->GetType());
+ throw 888;
+ } // endswitch Type
+
break;
case TYPE_BOOL:
if (vp->IsTypeNum())
- vp->SetValue(val->GetInteger() ? 1 : 0);
+ vp->SetValue(jvp->GetInteger() ? 1 : 0);
else
- vp->SetValue_psz((PSZ)(val->GetInteger() ? "true" : "false"));
+ vp->SetValue_psz((PSZ)(jvp->GetInteger() ? "true" : "false"));
break;
case TYPE_JAR:
- SetJsonValue(g, vp, val->GetArray()->GetValue(0), n);
- break;
+// SetJsonValue(g, vp, val->GetArray()->GetValue(0));
+ vp->SetValue_psz(jvp->GetArray()->GetText(g, NULL));
+ break;
case TYPE_JOB:
// if (!vp->IsTypeNum() || !Strict) {
- vp->SetValue_psz(val->GetObject()->GetText(g, NULL));
+ vp->SetValue_psz(jvp->GetObject()->GetText(g, NULL));
break;
// } // endif Type
@@ -1547,37 +1708,37 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
vp->SetNull(true);
} // endif val
- } // end of SetJsonValue
+} // end of SetJsonValue
/***********************************************************************/
/* ReadColumn: */
/***********************************************************************/
void JSONCOL::ReadColumn(PGLOBAL g)
- {
+{
if (!Tjp->SameRow || Xnod >= Tjp->SameRow)
Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0));
- if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept)
- throw("Null expandable JSON value");
+// if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept)
+// throw("Null expandable JSON value");
// Set null when applicable
if (!Nullable)
Value->SetNull(false);
- } // end of ReadColumn
+} // end of ReadColumn
/***********************************************************************/
/* GetColumnValue: */
/***********************************************************************/
PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
- {
+{
int n = Nod - 1;
PJAR arp;
PJVAL val = NULL;
for (; i < Nod && row; i++) {
if (Nodes[i].Op == OP_NUM) {
- Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1);
+ Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
return(Value);
} else if (Nodes[i].Op == OP_XX) {
return MakeJson(G, row);
@@ -1591,7 +1752,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
val = new(G) JVALUE(row);
} else
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
@@ -1599,7 +1760,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else if (Nodes[i].Op == OP_EXP)
return ExpandArray(g, arp, i);
else
@@ -1607,7 +1768,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif's
@@ -1625,15 +1786,15 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
} // endfor i
- SetJsonValue(g, Value, val, n);
+ SetJsonValue(g, Value, val);
return Value;
- } // end of GetColumnValue
+} // end of GetColumnValue
/***********************************************************************/
/* ExpandArray: */
/***********************************************************************/
PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
- {
+{
int ars = MY_MIN(Tjp->Limit, arp->size());
PJVAL jvp;
JVALUE jval;
@@ -1645,13 +1806,13 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
return Value;
} // endif ars
- if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) {
+ if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) {
strcpy(g->Message, "Logical error expanding array");
throw 666;
} // endif jvp
if (n < Nod - 1 && jvp->GetJson()) {
- jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvp->GetJson(), n + 1));
jvp = &jval;
} // endif n
@@ -1665,15 +1826,15 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
Tjp->NextSame = Xnod;
} // endif NextSame
- SetJsonValue(g, Value, jvp, n);
+ SetJsonValue(g, Value, jvp);
return Value;
- } // end of ExpandArray
+} // end of ExpandArray
/***********************************************************************/
/* CalculateArray: */
/***********************************************************************/
PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
- {
+{
int i, ars, nv = 0, nextsame = Tjp->NextSame;
bool err;
OPVAL op = Nodes[n].Op;
@@ -1689,18 +1850,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
ars, op, nextsame);
for (i = 0; i < ars; i++) {
- jvrp = arp->GetValue(i);
+ jvrp = arp->GetArrayValue(i);
if (trace(1))
htrc("i=%d nv=%d\n", i, nv);
if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do {
if (jvrp->IsNull()) {
- jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING);
+ jvrp->Strp = PlugDup(g, GetJsonNull());
+ jvrp->DataType = TYPE_STRG;
jvp = jvrp;
} else if (n < Nod - 1 && jvrp->GetJson()) {
Tjp->NextSame = nextsame;
- jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1));
jvp = &jval;
} else
jvp = jvrp;
@@ -1710,10 +1872,10 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
jvp->GetString(g), jvp->IsNull() ? 1 : 0);
if (!nv++) {
- SetJsonValue(g, vp, jvp, n);
+ SetJsonValue(g, vp, jvp);
continue;
} else
- SetJsonValue(g, MulVal, jvp, n);
+ SetJsonValue(g, MulVal, jvp);
if (!MulVal->IsNull()) {
switch (op) {
@@ -1768,19 +1930,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
Tjp->NextSame = nextsame;
return vp;
- } // end of CalculateArray
+} // end of CalculateArray
/***********************************************************************/
/* GetRow: Get the object containing this column. */
/***********************************************************************/
PJSON JSONCOL::GetRow(PGLOBAL g)
- {
+{
PJVAL val = NULL;
PJAR arp;
PJSON nwr, row = Tjp->Row;
for (int i = 0; i < Nod && row; i++) {
- if (Nodes[i+1].Op == OP_XX)
+ if (i < Nod-1 && Nodes[i+1].Op == OP_XX)
break;
else switch (row->GetType()) {
case TYPE_JOB:
@@ -1788,20 +1950,20 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
// Expected Array was not there, wrap the value
continue;
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
arp = (PJAR)row;
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else
- val = arp->GetValue(Nodes[i].Rx);
+ val = arp->GetArrayValue(Nodes[i].Rx);
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif Nodes
@@ -1828,9 +1990,9 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
nwr = new(G) JOBJECT;
if (row->GetType() == TYPE_JOB) {
- ((PJOB)row)->SetValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key);
+ ((PJOB)row)->SetKeyValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key);
} else if (row->GetType() == TYPE_JAR) {
- ((PJAR)row)->AddValue(G, new(G) JVALUE(nwr));
+ ((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr));
((PJAR)row)->InitArray(G);
} else {
strcpy(g->Message, "Wrong type when writing new row");
@@ -1846,13 +2008,13 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
} // endfor i
return row;
- } // end of GetRow
+} // end of GetRow
/***********************************************************************/
/* WriteColumn: */
/***********************************************************************/
void JSONCOL::WriteColumn(PGLOBAL g)
- {
+{
if (Xpd && Tjp->Pretty < 2) {
strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
throw 666;
@@ -1888,21 +2050,21 @@ void JSONCOL::WriteColumn(PGLOBAL g)
if (Nodes[Nod-1].Op == OP_XX) {
s = Value->GetCharValue();
- if (!(jsp = ParseJson(G, s, (int)strlen(s)))) {
+ if (!(jsp = ParseJson(G, s, strlen(s)))) {
strcpy(g->Message, s);
throw 666;
} // endif jsp
if (arp) {
if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ)
- arp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank);
+ arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank);
else
- arp->AddValue(G, new(G) JVALUE(jsp));
+ arp->AddArrayValue(G, new(G) JVALUE(jsp));
arp->InitArray(G);
} else if (objp) {
if (Nod > 1 && Nodes[Nod-2].Key)
- objp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key);
+ objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key);
} else if (jvp)
jvp->SetValue(jsp);
@@ -1919,24 +2081,24 @@ void JSONCOL::WriteColumn(PGLOBAL g)
case TYPE_DOUBLE:
if (arp) {
if (Nodes[Nod-1].Op == OP_EQ)
- arp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank);
+ arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank);
else
- arp->AddValue(G, new(G) JVALUE(G, Value));
+ arp->AddArrayValue(G, new(G) JVALUE(G, Value));
arp->InitArray(G);
} else if (objp) {
if (Nodes[Nod-1].Key)
- objp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key);
+ objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key);
} else if (jvp)
- jvp->SetValue(Value);
+ jvp->SetValue(g, Value);
break;
default: // ??????????
sprintf(g->Message, "Invalid column type %d", Buf_Type);
} // endswitch Type
- } // end of WriteColumn
+} // end of WriteColumn
/* -------------------------- Class TDBJSON -------------------------- */
@@ -1944,23 +2106,23 @@ void JSONCOL::WriteColumn(PGLOBAL g)
/* Implementation of the TDBJSON class. */
/***********************************************************************/
TDBJSON::TDBJSON(PJDEF tdp, PTXF txfp) : TDBJSN(tdp, txfp)
- {
+{
Doc = NULL;
Multiple = tdp->Multiple;
Done = Changed = false;
- } // end of TDBJSON standard constructor
+} // end of TDBJSON standard constructor
TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp)
- {
+{
Doc = tdbp->Doc;
Multiple = tdbp->Multiple;
Done = tdbp->Done;
Changed = tdbp->Changed;
- } // end of TDBJSON copy constructor
+} // end of TDBJSON copy constructor
// Used for update
PTDB TDBJSON::Clone(PTABS t)
- {
+{
PTDB tp;
PJCOL cp1, cp2;
PGLOBAL g = t->G;
@@ -1973,13 +2135,13 @@ PTDB TDBJSON::Clone(PTABS t)
} // endfor cp1
return tp;
- } // end of Clone
+} // end of Clone
/***********************************************************************/
/* Make the document tree from the object path. */
/***********************************************************************/
int TDBJSON::MakeNewDoc(PGLOBAL g)
- {
+{
// Create a void table that will be populated
Doc = new(g) JARRAY;
@@ -1988,15 +2150,16 @@ int TDBJSON::MakeNewDoc(PGLOBAL g)
Done = true;
return RC_OK;
- } // end of MakeNewDoc
+} // end of MakeNewDoc
/***********************************************************************/
/* Make the document tree from a file. */
/***********************************************************************/
int TDBJSON::MakeDocument(PGLOBAL g)
- {
+{
char *p, *p1, *p2, *memory, *objpath, *key = NULL;
- int len, i = 0;
+ int i = 0;
+ size_t len;
my_bool a;
MODE mode = Mode;
PJSON jsp;
@@ -2075,7 +2238,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
key = p;
objp = jsp->GetObject();
arp = NULL;
- val = objp->GetValue(key);
+ val = objp->GetKeyValue(key);
if (!val || !(jsp = val->GetJson())) {
sprintf(g->Message, "Cannot find object key %s", key);
@@ -2101,7 +2264,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
arp = jsp->GetArray();
objp = NULL;
i = atoi(p) - B;
- val = arp->GetValue(i);
+ val = arp->GetArrayValue(i);
if (!val) {
sprintf(g->Message, "Cannot find array value %d", i);
@@ -2122,17 +2285,17 @@ int TDBJSON::MakeDocument(PGLOBAL g)
Doc = new(g) JARRAY;
if (val) {
- Doc->AddValue(g, val);
+ Doc->AddArrayValue(g, val);
Doc->InitArray(g);
} else if (jsp) {
- Doc->AddValue(g, new(g) JVALUE(jsp));
+ Doc->AddArrayValue(g, new(g) JVALUE(jsp));
Doc->InitArray(g);
} // endif val
if (objp)
- objp->SetValue(g, new(g) JVALUE(Doc), key);
+ objp->SetKeyValue(g, new(g) JVALUE(Doc), key);
else if (arp)
- arp->SetValue(g, new(g) JVALUE(Doc), i);
+ arp->SetArrayValue(g, new(g) JVALUE(Doc), i);
else
Top = Doc;
@@ -2140,13 +2303,13 @@ int TDBJSON::MakeDocument(PGLOBAL g)
Done = true;
return RC_OK;
- } // end of MakeDocument
+} // end of MakeDocument
/***********************************************************************/
/* JSON Cardinality: returns table size in number of rows. */
/***********************************************************************/
int TDBJSON::Cardinality(PGLOBAL g)
- {
+{
if (!g)
return (Xcol || Multiple) ? 0 : 1;
else if (Cardinal < 0)
@@ -2159,48 +2322,48 @@ int TDBJSON::Cardinality(PGLOBAL g)
return 10;
}
return Cardinal;
- } // end of Cardinality
+} // end of Cardinality
/***********************************************************************/
/* JSON GetMaxSize: returns table size estimate in number of rows. */
/***********************************************************************/
int TDBJSON::GetMaxSize(PGLOBAL g)
- {
+{
if (MaxSize < 0)
MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1);
return MaxSize;
- } // end of GetMaxSize
+} // end of GetMaxSize
/***********************************************************************/
/* ResetSize: call by TDBMUL when calculating size estimate. */
/***********************************************************************/
void TDBJSON::ResetSize(void)
- {
+{
MaxSize = Cardinal = -1;
Fpos = -1;
N = 0;
Done = false;
- } // end of ResetSize
+} // end of ResetSize
/***********************************************************************/
/* TDBJSON is not indexable. */
/***********************************************************************/
int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
- {
+{
if (pxdf) {
strcpy(g->Message, "JSON not indexable when pretty = 2");
return RC_FX;
} else
return RC_OK;
- } // end of MakeIndex
+} // end of MakeIndex
/***********************************************************************/
/* Return the position in the table. */
/***********************************************************************/
int TDBJSON::GetRecpos(void)
- {
+{
#if 0
union {
uint Rpos;
@@ -2212,13 +2375,13 @@ int TDBJSON::GetRecpos(void)
return Rpos;
#endif // 0
return Fpos;
- } // end of GetRecpos
+} // end of GetRecpos
/***********************************************************************/
/* Set the position in the table. */
/***********************************************************************/
bool TDBJSON::SetRecpos(PGLOBAL, int recpos)
- {
+{
#if 0
union {
uint Rpos;
@@ -2239,13 +2402,13 @@ bool TDBJSON::SetRecpos(PGLOBAL, int recpos)
Fpos = recpos - 1;
return false;
- } // end of SetRecpos
+} // end of SetRecpos
/***********************************************************************/
/* JSON Access Method opening routine. */
/***********************************************************************/
bool TDBJSON::OpenDB(PGLOBAL g)
- {
+{
if (Use == USE_OPEN) {
/*******************************************************************/
/* Table already open replace it at its beginning. */
@@ -2277,13 +2440,13 @@ bool TDBJSON::OpenDB(PGLOBAL g)
Use = USE_OPEN;
return false;
- } // end of OpenDB
+} // end of OpenDB
/***********************************************************************/
/* ReadDB: Data Base read routine for JSON access method. */
/***********************************************************************/
int TDBJSON::ReadDB(PGLOBAL)
- {
+{
int rc;
N++;
@@ -2294,61 +2457,61 @@ int TDBJSON::ReadDB(PGLOBAL)
M++;
rc = RC_OK;
} else if (++Fpos < (signed)Doc->size()) {
- Row = Doc->GetValue(Fpos);
+ Row = Doc->GetArrayValue(Fpos);
if (Row->GetType() == TYPE_JVAL)
Row = ((PJVAL)Row)->GetJson();
SameRow = 0;
M = 1;
- rc = RC_OK;
+ rc = RC_OK;
} else
rc = RC_EF;
return rc;
- } // end of ReadDB
+} // end of ReadDB
/***********************************************************************/
/* WriteDB: Data Base write routine for JSON access method. */
/***********************************************************************/
int TDBJSON::WriteDB(PGLOBAL g)
- {
+{
if (Jmode == MODE_OBJECT) {
PJVAL vp = new(g) JVALUE(Row);
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, vp);
+ Doc->AddArrayValue(g, vp);
Row = new(g) JOBJECT;
- } else if (Doc->SetValue(g, vp, Fpos))
+ } else if (Doc->SetArrayValue(g, vp, Fpos))
return RC_FX;
} else if (Jmode == MODE_ARRAY) {
PJVAL vp = new(g) JVALUE(Row);
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, vp);
+ Doc->AddArrayValue(g, vp);
Row = new(g) JARRAY;
- } else if (Doc->SetValue(g, vp, Fpos))
+ } else if (Doc->SetArrayValue(g, vp, Fpos))
return RC_FX;
} else { // if (Jmode == MODE_VALUE)
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, (PJVAL)Row);
+ Doc->AddArrayValue(g, (PJVAL)Row);
Row = new(g) JVALUE;
- } else if (Doc->SetValue(g, (PJVAL)Row, Fpos))
+ } else if (Doc->SetArrayValue(g, (PJVAL)Row, Fpos))
return RC_FX;
} // endif Jmode
Changed = true;
return RC_OK;
- } // end of WriteDB
+} // end of WriteDB
/***********************************************************************/
/* Data Base delete line routine for JSON access method. */
/***********************************************************************/
int TDBJSON::DeleteDB(PGLOBAL g, int irc)
- {
+{
if (irc == RC_OK) {
// Deleted current row
if (Doc->DeleteValue(Fpos)) {
@@ -2365,13 +2528,13 @@ int TDBJSON::DeleteDB(PGLOBAL g, int irc)
} // endfor i
return RC_OK;
- } // end of DeleteDB
+} // end of DeleteDB
/***********************************************************************/
/* Data Base close routine for JSON access methods. */
/***********************************************************************/
void TDBJSON::CloseDB(PGLOBAL g)
- {
+{
if (!Changed)
return;
@@ -2387,7 +2550,7 @@ void TDBJSON::CloseDB(PGLOBAL g)
if (!Serialize(g, Top, filename, Pretty))
puts(g->Message);
- } // end of CloseDB
+} // end of CloseDB
/* ---------------------------TDBJCL class --------------------------- */
@@ -2395,18 +2558,18 @@ void TDBJSON::CloseDB(PGLOBAL g)
/* TDBJCL class constructor. */
/***********************************************************************/
TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp)
- {
+{
Topt = tdp->GetTopt();
Db = tdp->Schema;
Dsn = tdp->Uri;
- } // end of TDBJCL constructor
+} // end of TDBJCL constructor
/***********************************************************************/
/* GetResult: Get the list the JSON file columns. */
/***********************************************************************/
PQRYRES TDBJCL::GetResult(PGLOBAL g)
- {
+{
return JSONColumns(g, Db, Dsn, Topt, false);
- } // end of GetResult
+} // end of GetResult
/* --------------------------- End of json --------------------------- */
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 88aa5e2ee8b..b47dc9b0665 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -5,6 +5,7 @@
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
+#pragma once
//#include "osutil.h" // Unuseful and bad for OEM
#include "block.h"
#include "colblk.h"
@@ -35,7 +36,7 @@ typedef struct _jncol {
struct _jncol *Next;
char *Name;
char *Fmt;
- int Type;
+ JTYP Type;
int Len;
int Scale;
bool Cbn;
@@ -58,7 +59,7 @@ public:
// Members
JCOL jcol;
PJCL jcp, fjcp, pjcp;
- PVAL valp;
+//PVL vlp;
PJDEF tdp;
TDBJSN *tjnp;
PJTDB tjsp;
@@ -68,7 +69,7 @@ public:
PCSZ sep;
char colname[65], fmt[129], buf[16];
uint *length;
- int i, n, bf, ncol, lvl, sz;
+ int i, n, bf, ncol, lvl, sz, limit;
bool all, strfy;
}; // end of JSONDISC
@@ -126,6 +127,7 @@ public:
class DllExport TDBJSN : public TDBDOS {
friend class JSONCOL;
friend class JSONDEF;
+ friend class JSONDISC;
#if defined(CMGO_SUPPORT)
friend class CMGFAM;
#endif // CMGO_SUPPORT
@@ -154,14 +156,18 @@ public:
{return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;}
// Database routines
- virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
+ //virtual int Cardinality(PGLOBAL g);
+ //virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual bool PrepareWriting(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
+ virtual void CloseDB(PGLOBAL g);
- protected:
+ // Specific routine
+ virtual int EstimatedLength(void);
+
+protected:
PJSON FindRow(PGLOBAL g);
int MakeTopTree(PGLOBAL g, PJSON jsp);
@@ -169,7 +175,7 @@ public:
PGLOBAL G; // Support of parse memory
PJSON Top; // The top JSON tree
PJSON Row; // The current row
- PJSON Val; // The value of the current row
+ PJVAL Val; // The value of the current row
PJCOL Colp; // The multiple column
JMODE Jmode; // MODE_OBJECT by default
PCSZ Objname; // The table object name
@@ -186,7 +192,8 @@ public:
char Sep; // The Jpath separator
bool Strict; // Strict syntax checking
bool Comma; // Row has final comma
- }; // end of class TDBJSN
+ bool Xpdable; // False: expandable columns are NULL
+}; // end of class TDBJSN
/* -------------------------- JSONCOL class -------------------------- */
@@ -224,8 +231,8 @@ public:
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
- PJSON GetRow(PGLOBAL g);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
+ PJSON GetRow(PGLOBAL g);
// Default constructor not to be used
JSONCOL(void) {}
@@ -241,7 +248,8 @@ public:
char Sep; // The Jpath separator
bool Xpd; // True for expandable column
bool Parsed; // True when parsed
- }; // end of class JSONCOL
+ bool Warned; // True when warning issued
+}; // end of class JSONCOL
/* -------------------------- TDBJSON class -------------------------- */
diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp
index b1bdeffc880..1efda6e3bca 100644
--- a/storage/connect/tabrest.cpp
+++ b/storage/connect/tabrest.cpp
@@ -1,8 +1,11 @@
/************** tabrest C++ Program Source Code File (.CPP) ************/
-/* PROGRAM NAME: tabrest Version 1.7 */
-/* (C) Copyright to the author Olivier BERTRAND 2018 - 2019 */
+/* PROGRAM NAME: tabrest Version 1.8 */
+/* (C) Copyright to the author Olivier BERTRAND 2018 - 2020 */
/* This program is the REST Web API support for MariaDB. */
/* When compiled without MARIADB defined, it is the EOM module code. */
+/* The way Connect handles NOSQL data returned by REST queries is */
+/* just by retrieving it as a file and then leave the existing data */
+/* type tables (JSON, XML or CSV) process it as usual. */
/***********************************************************************/
/***********************************************************************/
@@ -10,6 +13,8 @@
/***********************************************************************/
#if defined(MARIADB)
#include <my_global.h> // All MariaDB stuff
+#include <mysqld.h>
+#include <sql_error.h>
#else // !MARIADB OEM module
#include "mini-global.h"
#define _MAX_PATH 260
@@ -42,7 +47,19 @@
#include "tabfmt.h"
#include "tabrest.h"
+#if defined(connect_EXPORTS)
+#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M)
+#else
+#define PUSH_WARNING(M) htrc(M)
+#endif
+
+#if defined(__WIN__) || defined(_WINDOWS)
+#define popen _popen
+#define pclose _pclose
+#endif
+
static XGETREST getRestFnc = NULL;
+static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename);
#if !defined(MARIADB)
/***********************************************************************/
@@ -72,7 +89,41 @@ PTABDEF __stdcall GetREST(PGLOBAL g, void *memp)
#endif // !MARIADB
/***********************************************************************/
-/* GetREST: get the external TABDEF from OEM module. */
+/* Xcurl: retrieve the REST answer by executing cURL. */
+/***********************************************************************/
+int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
+{
+ char buf[1024];
+ int rc;
+ FILE *pipe;
+
+ if (Uri) {
+ if (*Uri == '/' || Http[strlen(Http) - 1] == '/')
+ sprintf(buf, "curl %s%s -o %s", Http, Uri, filename);
+ else
+ sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename);
+
+ } else
+ sprintf(buf, "curl %s -o %s", Http, filename);
+
+ if ((pipe = popen(buf, "rt"))) {
+ if (trace(515))
+ while (fgets(buf, sizeof(buf), pipe)) {
+ htrc("%s", buf);
+ } // endwhile
+
+ pclose(pipe);
+ rc = 0;
+ } else {
+ sprintf(g->Message, "curl failed, errno =%d", errno);
+ rc = 1;
+ } // endif pipe
+
+ return rc;
+} // end od Xcurl
+
+/***********************************************************************/
+/* GetREST: load the Rest lib and get the Rest function. */
/***********************************************************************/
XGETREST GetRestFunction(PGLOBAL g)
{
@@ -148,13 +199,15 @@ PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
#endif // !MARIADB
{
- PQRYRES qrp= NULL;
- char filename[_MAX_PATH + 1]; // MAX PATH ???
- PCSZ http, uri, fn, ftype;
+ PQRYRES qrp= NULL;
+ char filename[_MAX_PATH + 1]; // MAX PATH ???
+ int rc;
+ bool curl = false;
+ PCSZ http, uri, fn, ftype;
XGETREST grf = GetRestFunction(g);
if (!grf)
- return NULL;
+ curl = true;
http = GetStringTableOption(g, tp, "Http", NULL);
uri = GetStringTableOption(g, tp, "Uri", NULL);
@@ -178,17 +231,27 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
fn = filename;
tp->filename = PlugDup(g, fn);
+ sprintf(g->Message, "No file name. Table will use %s", fn);
+ PUSH_WARNING(g->Message);
} // endif fn
// We used the file name relative to recorded datapath
PlugSetPath(filename, fn, db);
- //strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash);
- //strncat(filename, fn, _MAX_PATH - strlen(filename));
+ curl = GetBooleanTableOption(g, tp, "Curl", curl);
// Retrieve the file from the web and copy it locally
- if (http && grf(g->Message, trace(515), http, uri, filename)) {
- // sprintf(g->Message, "Failed to get file at %s", http);
- } else if (!stricmp(ftype, "JSON"))
+ if (curl)
+ rc = Xcurl(g, http, uri, filename);
+ else if (grf)
+ rc = grf(g->Message, trace(515), http, uri, filename);
+ else {
+ strcpy(g->Message, "Cannot access to curl nor casablanca");
+ rc = 1;
+ } // endif !grf
+
+ if (rc)
+ return NULL;
+ else if (!stricmp(ftype, "JSON"))
qrp = JSONColumns(g, db, NULL, tp, info);
else if (!stricmp(ftype, "CSV"))
qrp = CSVColumns(g, NULL, tp, info);
@@ -209,14 +272,14 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
/***********************************************************************/
bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
- char filename[_MAX_PATH + 1];
- int rc = 0, n;
- bool xt = trace(515);
- LPCSTR ftype;
+ char filename[_MAX_PATH + 1];
+ int rc = 0, n;
+ bool curl = false, xt = trace(515);
+ LPCSTR ftype;
XGETREST grf = GetRestFunction(g);
if (!grf)
- return true;
+ curl = true;
#if defined(MARIADB)
ftype = GetStringCatInfo(g, "Type", "JSON");
@@ -235,8 +298,8 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
: (!stricmp(ftype, "CSV")) ? 3 : 0;
if (n == 0) {
- htrc("DefineAM: Unsupported REST table type %s", am);
- sprintf(g->Message, "Unsupported REST table type %s", am);
+ htrc("DefineAM: Unsupported REST table type %s\n", ftype);
+ sprintf(g->Message, "Unsupported REST table type %s", ftype);
return true;
} // endif n
@@ -247,11 +310,19 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
// We used the file name relative to recorded datapath
PlugSetPath(filename, Fn, GetPath());
- // Retrieve the file from the web and copy it locally
- rc = grf(g->Message, xt, Http, Uri, filename);
+ curl = GetBoolCatInfo("Curl", curl);
- if (xt)
- htrc("Return from restGetFile: rc=%d\n", rc);
+ // Retrieve the file from the web and copy it locally
+ if (curl) {
+ rc = Xcurl(g, Http, Uri, filename);
+ xtrc(515, "Return from Xcurl: rc=%d\n", rc);
+ } else if (grf) {
+ rc = grf(g->Message, xt, Http, Uri, filename);
+ xtrc(515, "Return from restGetFile: rc=%d\n", rc);
+ } else {
+ strcpy(g->Message, "Cannot access to curl nor casablanca");
+ rc = 1;
+ } // endif !grf
if (rc)
return true;
diff --git a/storage/connect/tabrest.h b/storage/connect/tabrest.h
index f08ac7984c9..9cf2d10a6b8 100644
--- a/storage/connect/tabrest.h
+++ b/storage/connect/tabrest.h
@@ -5,7 +5,10 @@
/***********************************************************************/
#pragma once
-#ifndef __WIN__
+#if defined(__WIN__)
+static PCSZ slash = "\\";
+#else // !__WIN__
+static PCSZ slash = "/";
#define stricmp strcasecmp
#endif // !__WIN__
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index f5a105a530d..0a91f36afa7 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -708,7 +708,7 @@ bool PRXCOL::Init(PGLOBAL g, PTDB tp)
MODE mode = To_Tdb->GetMode();
// Needed for MYSQL subtables
- ((XCOLBLK*)Colp)->Name = Decode(g, Colp->GetName());
+ ((COLBLK*)Colp)->SetName(Decode(g, Colp->GetName()));
// May not have been done elsewhere
Colp->InitValue(g);
diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp
index 76d52e198e3..2fdb7f64744 100644
--- a/storage/connect/tabvir.cpp
+++ b/storage/connect/tabvir.cpp
@@ -168,17 +168,16 @@ int TDBVIR::TestFilter(PFIL filp, bool nop)
} // endswitch op
if (!nop) switch (op) {
- case OP_LT: l1--;
- /* falls through */
- case OP_LE: limit = l1; break;
- default: ok = false;
- } // endswitch op
+ case OP_LT: l1--; /* fall through */
+ case OP_LE: limit = l1; break;
+ default: ok = false;
+ } // endswitch op
+
else switch (op) {
- case OP_GE: l1--;
- /* falls through */
- case OP_GT: limit = l1; break;
- default: ok = false;
- } // endswitch op
+ case OP_GE: l1--; /* fall through */
+ case OP_GT: limit = l1; break;
+ default: ok = false;
+ } // endswitch op
limit = MY_MIN(MY_MAX(0, limit), Size);
diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc
index c8f38b68015..5268651d080 100644
--- a/storage/connect/user_connect.cc
+++ b/storage/connect/user_connect.cc
@@ -112,8 +112,7 @@ bool user_connect::user_init()
if (g)
printf("%s\n", g->Message);
- int rc __attribute__((unused))= PlugExit(g);
- g= NULL;
+ g= PlugExit(g);
if (dup)
free(dup);
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 5951b26e81e..412cb808936 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -197,7 +197,7 @@ const char *GetFormatType(int type)
case TYPE_DOUBLE: c = "F"; break;
case TYPE_DATE: c = "D"; break;
case TYPE_TINY: c = "T"; break;
- case TYPE_DECIM: c = "M"; break;
+ case TYPE_DECIM: c = "F"; break;
case TYPE_BIN: c = "B"; break;
case TYPE_PCHAR: c = "P"; break;
} // endswitch type
@@ -380,8 +380,8 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec,
case TYPE_STRING:
valp = new(g) TYPVAL<PSZ>(g, (PSZ)NULL, len, prec);
break;
- case TYPE_DATE:
- valp = new(g) DTVAL(g, len, prec, fmt);
+ case TYPE_DATE:
+ valp = new(g) DTVAL(g, len, prec, fmt);
break;
case TYPE_INT:
if (uns)
diff --git a/storage/connect/value.h b/storage/connect/value.h
index ee7a1c8032f..df6a55501b6 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -65,7 +65,8 @@ DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc);
/***********************************************************************/
class DllExport VALUE : public BLOCK {
friend class CONSTANT; // The only object allowed to use SetConstFormat
- public:
+ friend class SWAP; // The only class allowed to access protected
+public:
// Constructors
// Implementation
@@ -260,7 +261,8 @@ class DllExport TYPVAL : public VALUE {
/***********************************************************************/
template <>
class DllExport TYPVAL<PSZ>: public VALUE {
- public:
+ friend class SWAP; // The only class allowed to offsets Strg
+public:
// Constructors
TYPVAL(PSZ s, short c = 0);
TYPVAL(PGLOBAL g, PSZ s, int n, int c);
@@ -346,7 +348,8 @@ class DllExport DECVAL: public TYPVAL<PSZ> {
/* Specific BINARY class. */
/***********************************************************************/
class DllExport BINVAL: public VALUE {
- public:
+ friend class SWAP; // The only class allowed to offsets pointers
+public:
// Constructors
//BINVAL(void *p);
BINVAL(PGLOBAL g, void *p, int cl, int n);
@@ -415,7 +418,8 @@ class DllExport DTVAL : public TYPVAL<int> {
virtual bool SetValue_char(const char *p, int n);
virtual void SetValue_psz(PCSZ s);
virtual void SetValue_pvblk(PVBLK blk, int n);
- virtual char *GetCharString(char *p);
+ virtual PSZ GetCharValue(void) { return Sdate; }
+ virtual char *GetCharString(char *p);
virtual int ShowValue(char *buf, int len);
virtual bool FormatValue(PVAL vp, PCSZ fmt);
bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0);
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h
index bc5912d3054..5b50e9320f5 100644
--- a/storage/connect/xobject.h
+++ b/storage/connect/xobject.h
@@ -130,6 +130,7 @@ class DllExport STRING : public BLOCK {
inline void SetLength(uint n) {Length = n;}
inline PSZ GetStr(void) {return Strp;}
inline uint32 GetSize(void) {return Size;}
+ inline char GetLastChar(void) {return Length ? Strp[Length - 1] : 0;}
inline bool IsTruncated(void) {return Trc;}
// Methods
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index badb515c2b2..6e7ee48d2eb 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -528,7 +528,7 @@ int ha_tina::encode_quote(const uchar *buf)
String attribute(attribute_buffer, sizeof(attribute_buffer),
&my_charset_bin);
bool ietf_quotes= table_share->option_struct->ietf_quotes;
- my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
buffer.length(0);
for (Field **field=table->field ; *field ; field++)
@@ -606,7 +606,7 @@ int ha_tina::encode_quote(const uchar *buf)
//buffer.replace(buffer.length(), 0, "\n", 1);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return (buffer.length());
}
@@ -659,7 +659,6 @@ int ha_tina::find_current_row(uchar *buf)
{
my_off_t end_offset, curr_offset= current_position;
int eoln_len;
- my_bitmap_map *org_bitmap;
int error;
bool read_all;
bool ietf_quotes= table_share->option_struct->ietf_quotes;
@@ -679,7 +678,7 @@ int ha_tina::find_current_row(uchar *buf)
/* We must read all columns in case a table is opened for update */
read_all= !bitmap_is_clear_all(table->write_set);
/* Avoid asserts in ::store() for columns that are not going to be updated */
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
error= HA_ERR_CRASHED_ON_USAGE;
memset(buf, 0, table->s->null_bytes);
@@ -857,7 +856,7 @@ int ha_tina::find_current_row(uchar *buf)
error= 0;
err:
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(error);
}
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index ec34cf16858..00407730c31 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -936,7 +936,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record,
{
ulong *lengths;
Field **field;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
lengths= mysql_fetch_lengths(result);
@@ -965,7 +965,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record,
}
(*field)->move_field_offset(-old_ptr);
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(0);
}
@@ -1293,14 +1293,13 @@ bool ha_federated::create_where_from_key(String *to,
char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
const key_range *ranges[2]= { start_key, end_key };
- my_bitmap_map *old_map;
DBUG_ENTER("ha_federated::create_where_from_key");
tmp.length(0);
if (start_key == NULL && end_key == NULL)
DBUG_RETURN(1);
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (uint i= 0; i <= 1; i++)
{
bool needs_quotes;
@@ -1477,7 +1476,7 @@ prepare_for_next_key_part:
tmp.c_ptr_quick()));
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
if (both_not_null)
if (tmp.append(STRING_WITH_LEN(") ")))
@@ -1492,7 +1491,7 @@ prepare_for_next_key_part:
DBUG_RETURN(0);
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(1);
}
@@ -1841,7 +1840,7 @@ int ha_federated::write_row(const uchar *buf)
String insert_field_value_string(insert_field_value_buffer,
sizeof(insert_field_value_buffer),
&my_charset_bin);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("ha_federated::write_row");
values_string.length(0);
@@ -1895,7 +1894,7 @@ int ha_federated::write_row(const uchar *buf)
values_string.append(STRING_WITH_LEN(", "));
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
/*
if there were no fields, we don't want to add a closing paren
@@ -2203,7 +2202,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data)
else
{
/* otherwise = */
- my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
if (needs_quote)
@@ -2212,7 +2211,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data)
if (needs_quote)
update_string.append(value_quote_char);
field_value.length(0);
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
}
update_string.append(STRING_WITH_LEN(", "));
}
diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc
index cc4d8ca7c70..f33cf45a241 100644
--- a/storage/federatedx/federatedx_io_mysql.cc
+++ b/storage/federatedx/federatedx_io_mysql.cc
@@ -64,7 +64,6 @@ struct mysql_position
class federatedx_io_mysql :public federatedx_io
{
MYSQL mysql; /* MySQL connection */
- MYSQL_ROWS *current;
DYNAMIC_ARRAY savepoints;
bool requested_autocommit;
bool actual_autocommit;
@@ -108,7 +107,8 @@ public:
virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
- virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result,
+ FEDERATEDX_IO_ROWS **current= NULL);
virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
unsigned int column);
@@ -117,7 +117,7 @@ public:
virtual size_t get_ref_length() const;
virtual void mark_position(FEDERATEDX_IO_RESULT *io_result,
- void *ref);
+ void *ref, FEDERATEDX_IO_ROWS *current);
virtual int seek_position(FEDERATEDX_IO_RESULT **io_result,
const void *ref);
virtual void set_thd(void *thd);
@@ -517,10 +517,12 @@ my_ulonglong federatedx_io_mysql::get_num_rows(FEDERATEDX_IO_RESULT *io_result)
}
-FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result)
+FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result,
+ FEDERATEDX_IO_ROWS **current)
{
MYSQL_RES *result= (MYSQL_RES*)io_result;
- current= result->data_cursor;
+ if (current)
+ *current= (FEDERATEDX_IO_ROWS *) result->data_cursor;
return (FEDERATEDX_IO_ROW *) mysql_fetch_row(result);
}
@@ -628,11 +630,11 @@ size_t federatedx_io_mysql::get_ref_length() const
void federatedx_io_mysql::mark_position(FEDERATEDX_IO_RESULT *io_result,
- void *ref)
+ void *ref, FEDERATEDX_IO_ROWS *current)
{
mysql_position& pos= *reinterpret_cast<mysql_position*>(ref);
pos.result= (MYSQL_RES *) io_result;
- pos.offset= current;
+ pos.offset= (MYSQL_ROW_OFFSET) current;
}
int federatedx_io_mysql::seek_position(FEDERATEDX_IO_RESULT **io_result,
diff --git a/storage/federatedx/federatedx_io_null.cc b/storage/federatedx/federatedx_io_null.cc
index 1976f22124a..b1058dbd2f5 100644
--- a/storage/federatedx/federatedx_io_null.cc
+++ b/storage/federatedx/federatedx_io_null.cc
@@ -90,7 +90,8 @@ public:
virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
- virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result,
+ FEDERATEDX_IO_ROWS **current= NULL);
virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
unsigned int column);
@@ -98,7 +99,7 @@ public:
unsigned int column) const;
virtual size_t get_ref_length() const;
virtual void mark_position(FEDERATEDX_IO_RESULT *io_result,
- void *ref);
+ void *ref, FEDERATEDX_IO_ROWS *current);
virtual int seek_position(FEDERATEDX_IO_RESULT **io_result,
const void *ref);
};
@@ -242,7 +243,8 @@ my_ulonglong federatedx_io_null::get_num_rows(FEDERATEDX_IO_RESULT *)
}
-FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *)
+FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *,
+ FEDERATEDX_IO_ROWS **current)
{
return NULL;
}
@@ -288,7 +290,7 @@ size_t federatedx_io_null::get_ref_length() const
void federatedx_io_null::mark_position(FEDERATEDX_IO_RESULT *io_result,
- void *ref)
+ void *ref, FEDERATEDX_IO_ROWS *current)
{
}
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 3c2b4cdc25b..19b56980714 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -871,7 +871,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record,
ulong *lengths;
Field **field;
int column= 0;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
DBUG_ENTER("ha_federatedx::convert_row_to_internal_format");
@@ -900,7 +900,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record,
(*field)->move_field_offset(-old_ptr);
}
table->in_use->variables.time_zone= saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(0);
}
@@ -1229,7 +1229,6 @@ bool ha_federatedx::create_where_from_key(String *to,
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
const key_range *ranges[2]= { start_key, end_key };
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
- my_bitmap_map *old_map;
DBUG_ENTER("ha_federatedx::create_where_from_key");
tmp.length(0);
@@ -1237,7 +1236,7 @@ bool ha_federatedx::create_where_from_key(String *to,
DBUG_RETURN(1);
table->in_use->variables.time_zone= UTC;
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (uint i= 0; i <= 1; i++)
{
bool needs_quotes;
@@ -1413,7 +1412,7 @@ prepare_for_next_key_part:
tmp.c_ptr_quick()));
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
if (both_not_null)
@@ -1429,7 +1428,7 @@ prepare_for_next_key_part:
DBUG_RETURN(0);
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
DBUG_RETURN(1);
}
@@ -2004,7 +2003,7 @@ int ha_federatedx::write_row(const uchar *buf)
sizeof(insert_field_value_buffer),
&my_charset_bin);
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("ha_federatedx::write_row");
table->in_use->variables.time_zone= UTC;
@@ -2059,7 +2058,7 @@ int ha_federatedx::write_row(const uchar *buf)
values_string.append(STRING_WITH_LEN(", "));
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
/*
@@ -2384,7 +2383,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data)
else
{
/* otherwise = */
- my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
if (needs_quote)
@@ -2393,7 +2392,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data)
if (needs_quote)
update_string.append(value_quote_char);
field_value.length(0);
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
}
update_string.append(STRING_WITH_LEN(", "));
}
@@ -2942,7 +2941,7 @@ int ha_federatedx::read_next(uchar *buf, FEDERATEDX_IO_RESULT *result)
DBUG_RETURN(retval);
/* Fetch a row, insert it back in a row format. */
- if (!(row= io->fetch_row(result)))
+ if (!(row= io->fetch_row(result, &current)))
DBUG_RETURN(HA_ERR_END_OF_FILE);
if (!(retval= convert_row_to_internal_format(buf, row, result)))
@@ -2986,7 +2985,7 @@ void ha_federatedx::position(const uchar *record __attribute__ ((unused)))
if (txn->acquire(share, ha_thd(), TRUE, &io))
DBUG_VOID_RETURN;
- io->mark_position(stored_result, ref);
+ io->mark_position(stored_result, ref, current);
position_called= TRUE;
@@ -3420,7 +3419,9 @@ int ha_federatedx::create(const char *name, TABLE *table_arg,
{
FEDERATEDX_SERVER server;
- fill_server(thd->mem_root, &server, &tmp_share, create_info->table_charset);
+ // It's possibly wrong to use alter_table_convert_to_charset here.
+ fill_server(thd->mem_root, &server, &tmp_share,
+ create_info->alter_table_convert_to_charset);
#ifndef DBUG_OFF
mysql_mutex_init(fe_key_mutex_FEDERATEDX_SERVER_mutex,
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index 1870a83d13d..7b6504db93d 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -131,6 +131,7 @@ typedef struct st_federatedx_share {
typedef struct st_federatedx_result FEDERATEDX_IO_RESULT;
typedef struct st_federatedx_row FEDERATEDX_IO_ROW;
+typedef struct st_federatedx_rows FEDERATEDX_IO_ROWS;
typedef ptrdiff_t FEDERATEDX_IO_OFFSET;
class federatedx_io
@@ -207,7 +208,8 @@ public:
virtual void free_result(FEDERATEDX_IO_RESULT *io_result)=0;
virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result)=0;
virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result)=0;
- virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result,
+ FEDERATEDX_IO_ROWS **current= NULL)=0;
virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result)=0;
virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
unsigned int column)=0;
@@ -216,7 +218,7 @@ public:
virtual size_t get_ref_length() const=0;
virtual void mark_position(FEDERATEDX_IO_RESULT *io_result,
- void *ref)=0;
+ void *ref, FEDERATEDX_IO_ROWS *current)=0;
virtual int seek_position(FEDERATEDX_IO_RESULT **io_result,
const void *ref)=0;
virtual void set_thd(void *thd) { }
@@ -269,6 +271,7 @@ class ha_federatedx: public handler
federatedx_txn *txn;
federatedx_io *io;
FEDERATEDX_IO_RESULT *stored_result;
+ FEDERATEDX_IO_ROWS *current;
/**
Array of all stored results we get during a query execution.
*/
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index ff5bb4b3ec4..afbb929c372 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -363,9 +363,6 @@ int ha_heap::info(uint flag)
{
HEAPINFO hp_info;
- if (!table)
- return 0;
-
(void) heap_info(file,&hp_info,flag);
errkey= hp_info.errkey;
diff --git a/storage/innobase/.clang-format b/storage/innobase/.clang-format-old
index 54f7b47bc88..54f7b47bc88 100644
--- a/storage/innobase/.clang-format
+++ b/storage/innobase/.clang-format-old
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 8ee7d167805..aca02ea3998 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -898,7 +898,7 @@ btr_page_get_father_node_ptr_func(
node_ptr = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) {
@@ -915,10 +915,11 @@ btr_page_get_father_node_ptr_func(
print_rec = page_rec_get_next(
page_get_infimum_rec(page_align(user_rec)));
offsets = rec_get_offsets(print_rec, index, offsets,
- page_rec_is_leaf(user_rec),
+ page_rec_is_leaf(user_rec)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
page_rec_print(print_rec, offsets);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
page_rec_print(node_ptr, offsets);
@@ -2284,7 +2285,9 @@ btr_page_get_split_rec(
incl_data += insert_size;
} else {
offsets = rec_get_offsets(rec, cursor->index, offsets,
- page_is_leaf(page),
+ page_is_leaf(page)
+ ? cursor->index->n_core_fields
+ : 0,
ULINT_UNDEFINED, &heap);
incl_data += rec_offs_size(offsets);
}
@@ -2393,7 +2396,9 @@ btr_page_insert_fits(
space after rec is removed from page. */
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
- page_is_leaf(page),
+ page_is_leaf(page)
+ ? cursor->index->n_core_fields
+ : 0,
ULINT_UNDEFINED, heap);
total_data -= rec_offs_size(*offsets);
@@ -2680,7 +2685,8 @@ btr_page_tuple_smaller(
first_rec = page_cur_get_rec(&pcur);
*offsets = rec_get_offsets(
- first_rec, cursor->index, *offsets, page_is_leaf(block->frame),
+ first_rec, cursor->index, *offsets,
+ page_is_leaf(block->frame) ? cursor->index->n_core_fields : 0,
n_uniq, heap);
return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0);
@@ -2964,7 +2970,9 @@ func_start:
first_rec = move_limit = split_rec;
*offsets = rec_get_offsets(split_rec, cursor->index, *offsets,
- page_is_leaf(page), n_uniq, heap);
+ page_is_leaf(page)
+ ? cursor->index->n_core_fields : 0,
+ n_uniq, heap);
insert_left = !tuple
|| cmp_dtuple_rec(tuple, split_rec, *offsets) < 0;
@@ -3730,7 +3738,7 @@ retry:
rec_offs* offsets2 = NULL;
/* For rtree, we need to update father's mbr. */
- if (dict_index_is_spatial(index)) {
+ if (index->is_spatial()) {
/* We only support merge pages with the same parent
page */
if (!rtr_check_same_block(
@@ -3748,7 +3756,8 @@ retry:
offsets2 = rec_get_offsets(
btr_cur_get_rec(&cursor2), index, NULL,
- page_is_leaf(cursor2.page_cur.block->frame),
+ page_is_leaf(cursor2.page_cur.block->frame)
+ ? index->n_fields : 0,
ULINT_UNDEFINED, &heap);
/* Check if parent entry needs to be updated */
@@ -3922,13 +3931,14 @@ retry:
#endif /* UNIV_DEBUG */
/* For rtree, we need to update father's mbr. */
- if (dict_index_is_spatial(index)) {
+ if (index->is_spatial()) {
rec_offs* offsets2;
ulint rec_info;
offsets2 = rec_get_offsets(
btr_cur_get_rec(&cursor2), index, NULL,
- page_is_leaf(cursor2.page_cur.block->frame),
+ page_is_leaf(cursor2.page_cur.block->frame)
+ ? index->n_fields : 0,
ULINT_UNDEFINED, &heap);
ut_ad(btr_node_ptr_get_child_page_no(
@@ -4094,12 +4104,13 @@ btr_discard_only_page_on_level(
mtr_t* mtr) /*!< in: mtr */
{
ulint page_level = 0;
- trx_id_t max_trx_id;
ut_ad(!index->is_dummy);
/* Save the PAGE_MAX_TRX_ID from the leaf page. */
- max_trx_id = page_get_max_trx_id(buf_block_get_frame(block));
+ const trx_id_t max_trx_id = page_get_max_trx_id(block->frame);
+ const rec_t* r = page_rec_get_next(page_get_infimum_rec(block->frame));
+ ut_ad(rec_is_metadata(r, *index) == index->is_instant());
while (block->page.id.page_no() != dict_index_get_page(index)) {
btr_cur_t cursor;
@@ -4150,16 +4161,14 @@ btr_discard_only_page_on_level(
}
#endif /* UNIV_BTR_DEBUG */
- mem_heap_t* heap = NULL;
- const rec_t* rec = NULL;
- rec_offs* offsets = NULL;
+ mem_heap_t* heap = nullptr;
+ const rec_t* rec = nullptr;
+ rec_offs* offsets = nullptr;
if (index->table->instant) {
- const rec_t* r = page_rec_get_next(page_get_infimum_rec(
- block->frame));
- ut_ad(rec_is_metadata(r, *index) == index->is_instant());
if (rec_is_alter_metadata(r, *index)) {
heap = mem_heap_create(srv_page_size);
- offsets = rec_get_offsets(r, index, NULL, true,
+ offsets = rec_get_offsets(r, index, nullptr,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
rec = rec_copy(mem_heap_alloc(heap,
rec_offs_size(offsets)),
@@ -4433,7 +4442,7 @@ btr_print_recursive(
node_ptr = page_cur_get_rec(&cursor);
*offsets = rec_get_offsets(
- node_ptr, index, *offsets, false,
+ node_ptr, index, *offsets, 0,
ULINT_UNDEFINED, heap);
btr_print_recursive(index,
btr_node_ptr_get_child(node_ptr,
@@ -4582,7 +4591,9 @@ btr_index_rec_validate(
page = page_align(rec);
- if (dict_index_is_ibuf(index)) {
+ ut_ad(index->n_core_fields);
+
+ if (index->is_ibuf()) {
/* The insert buffer index tree can contain records from any
other index: we cannot check the number of fields or
their length */
@@ -4646,7 +4657,8 @@ n_field_mismatch:
}
}
- offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
+ offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
const dict_field_t* field = index->fields;
ut_ad(rec_offs_n_fields(offsets)
@@ -4668,6 +4680,16 @@ n_field_mismatch:
} else {
fixed_size = dict_col_get_fixed_size(
field->col, page_is_comp(page));
+ if (rec_offs_nth_extern(offsets, i)) {
+ const byte* data = rec_get_nth_field(
+ rec, offsets, i, &len);
+ len -= BTR_EXTERN_FIELD_REF_SIZE;
+ ulint extern_len = mach_read_from_4(
+ data + len + BTR_EXTERN_LEN + 4);
+ if (fixed_size == extern_len) {
+ goto next_field;
+ }
+ }
}
/* Note that if fixed_size != 0, it equals the
@@ -4700,7 +4722,7 @@ len_mismatch:
}
return(FALSE);
}
-
+next_field:
field++;
}
@@ -4893,7 +4915,7 @@ btr_validate_level(
page_cur_move_to_next(&cursor);
node_ptr = page_cur_get_rec(&cursor);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
savepoint2 = mtr_set_savepoint(&mtr);
@@ -5017,10 +5039,12 @@ loop:
right_rec = page_rec_get_next(page_get_infimum_rec(
right_page));
offsets = rec_get_offsets(rec, index, offsets,
- page_is_leaf(page),
+ page_is_leaf(page)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
offsets2 = rec_get_offsets(right_rec, index, offsets2,
- page_is_leaf(right_page),
+ page_is_leaf(right_page)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
/* For spatial index, we cannot guarantee the key ordering
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index 51c91c5b037..65cb6e83783 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -193,7 +193,8 @@ PageBulk::insert(
if (!page_rec_is_infimum_low(page_offset(m_cur_rec))) {
rec_t* old_rec = m_cur_rec;
rec_offs* old_offsets = rec_get_offsets(
- old_rec, m_index, NULL, is_leaf,
+ old_rec, m_index, NULL, is_leaf
+ ? m_index->n_core_fields : 0,
ULINT_UNDEFINED, &m_heap);
ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index)
@@ -447,6 +448,7 @@ PageBulk::getSplitRec()
ut_ad(m_page_zip != NULL);
ut_ad(m_rec_no >= 2);
+ ut_ad(!m_index->is_instant());
ut_ad(page_get_free_space_of_empty(m_is_comp) > m_free_space);
total_used_size = page_get_free_space_of_empty(m_is_comp)
@@ -456,13 +458,13 @@ PageBulk::getSplitRec()
n_recs = 0;
offsets = NULL;
rec = page_get_infimum_rec(m_page);
+ const ulint n_core = page_is_leaf(m_page) ? m_index->n_core_fields : 0;
do {
rec = page_rec_get_next(rec);
ut_ad(page_rec_is_user_rec(rec));
- offsets = rec_get_offsets(rec, m_index, offsets,
- page_is_leaf(m_page),
+ offsets = rec_get_offsets(rec, m_index, offsets, n_core,
ULINT_UNDEFINED, &m_heap);
total_recs_size += rec_offs_size(offsets);
n_recs++;
@@ -491,9 +493,11 @@ PageBulk::copyIn(
ut_ad(m_rec_no == 0);
ut_ad(page_rec_is_user_rec(rec));
+ const ulint n_core = page_rec_is_leaf(rec)
+ ? m_index->n_core_fields : 0;
+
do {
- offsets = rec_get_offsets(rec, m_index, offsets,
- page_rec_is_leaf(split_rec),
+ offsets = rec_get_offsets(rec, m_index, offsets, n_core,
ULINT_UNDEFINED, &m_heap);
insert(rec, offsets);
@@ -534,8 +538,10 @@ PageBulk::copyOut(
/* Set last record's next in page */
rec_offs* offsets = NULL;
rec = page_rec_get_prev(split_rec);
- offsets = rec_get_offsets(rec, m_index, offsets,
- page_rec_is_leaf(split_rec),
+ const ulint n_core = page_rec_is_leaf(split_rec)
+ ? m_index->n_core_fields : 0;
+
+ offsets = rec_get_offsets(rec, m_index, offsets, n_core,
ULINT_UNDEFINED, &m_heap);
page_rec_set_next(rec, page_get_supremum_rec(m_page));
@@ -543,8 +549,7 @@ PageBulk::copyOut(
m_cur_rec = rec;
m_heap_top = rec_get_end(rec, offsets);
- offsets = rec_get_offsets(last_rec, m_index, offsets,
- page_rec_is_leaf(split_rec),
+ offsets = rec_get_offsets(last_rec, m_index, offsets, n_core,
ULINT_UNDEFINED, &m_heap);
m_free_space += ulint(rec_get_end(last_rec, offsets) - m_heap_top)
@@ -976,7 +981,8 @@ BtrBulk::insert(
/* Convert tuple to rec. */
rec = rec_convert_dtuple_to_rec(static_cast<byte*>(mem_heap_alloc(
page_bulk->m_heap, rec_size)), m_index, tuple, n_ext);
- offsets = rec_get_offsets(rec, m_index, offsets, !level,
+ offsets = rec_get_offsets(rec, m_index, offsets, level
+ ? 0 : m_index->n_core_fields,
ULINT_UNDEFINED, &page_bulk->m_heap);
page_bulk->insert(rec, offsets);
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 59c6d06d5af..aeb5e2aaa9c 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -3,7 +3,7 @@
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -67,6 +67,9 @@ Created 10/16/1994 Heikki Tuuri
#include "srv0start.h"
#include "mysql_com.h"
#include "dict0stats.h"
+#ifdef WITH_WSREP
+#include "mysql/service_wsrep.h"
+#endif /* WITH_WSREP */
/** Buffered B-tree operation types, introduced as part of delete buffering. */
enum btr_op_t {
@@ -592,7 +595,8 @@ incompatible:
}
mem_heap_t* heap = NULL;
- rec_offs* offsets = rec_get_offsets(rec, index, NULL, true,
+ rec_offs* offsets = rec_get_offsets(rec, index, NULL,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (rec_offs_any_default(offsets)) {
inconsistent:
@@ -1189,7 +1193,6 @@ static ulint btr_node_ptr_max_size(const dict_index_t* index)
TABLE_STATS_NAME)
|| !strcmp(index->table->name.m_name,
INDEX_STATS_NAME))) {
- ut_ad(!strcmp(field->name, "table_name"));
/* Interpret "table_name" as VARCHAR(199) even
if it was incorrectly defined as VARCHAR(64).
While the caller of ha_innobase enforces the
@@ -2047,7 +2050,7 @@ retry_page_get:
node_ptr = page_cur_get_rec(page_cursor);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
@@ -2178,7 +2181,7 @@ need_opposite_intention:
offsets2 = rec_get_offsets(
first_rec, index, offsets2,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
cmp_rec_rec(node_ptr, first_rec,
offsets, offsets2, index, false,
&matched_fields);
@@ -2196,7 +2199,7 @@ need_opposite_intention:
offsets2 = rec_get_offsets(
last_rec, index, offsets2,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
cmp_rec_rec(
node_ptr, last_rec,
offsets, offsets2, index,
@@ -2365,7 +2368,7 @@ need_opposite_intention:
offsets = rec_get_offsets(
my_node_ptr, index, offsets,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
ulint my_page_no
= btr_node_ptr_get_child_page_no(
@@ -2818,7 +2821,7 @@ btr_cur_open_at_index_side_func(
node_ptr = page_cur_get_rec(page_cursor);
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -3113,7 +3116,7 @@ btr_cur_open_at_rnd_pos_func(
node_ptr = page_cur_get_rec(page_cursor);
offsets = rec_get_offsets(node_ptr, cursor->index, offsets,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
/* If the rec is the first or last in the page for
pessimistic delete intention, it might cause node_ptr insert
@@ -3288,7 +3291,8 @@ btr_cur_ins_lock_and_undo(
/* Check if there is predicate or GAP lock preventing the insertion */
if (!(flags & BTR_NO_LOCKING_FLAG)) {
- if (dict_index_is_spatial(index)) {
+ const unsigned type = index->type;
+ if (UNIV_UNLIKELY(type & DICT_SPATIAL)) {
lock_prdt_t prdt;
rtr_mbr_t mbr;
@@ -3305,9 +3309,30 @@ btr_cur_ins_lock_and_undo(
index, thr, mtr, &prdt);
*inherit = false;
} else {
+#ifdef WITH_WSREP
+ trx_t* trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary
+ key is wsrep high priority thread (brute
+ force) this scanning may involve GAP-locking
+ in the index. As this locking happens also
+ when applying replication events in high
+ priority applier threads, there is a
+ probability for lock conflicts between two
+ wsrep high priority threads. To avoid this
+ GAP-locking we mark that this transaction
+ is using unique key scan here. */
+ if ((type & (DICT_CLUSTERED | DICT_UNIQUE)) == DICT_UNIQUE
+ && trx->is_wsrep()
+ && wsrep_thd_is_BF(trx->mysql_thd, false)) {
+ trx->wsrep_UK_scan= true;
+ }
+#endif /* WITH_WSREP */
err = lock_rec_insert_check_and_lock(
flags, rec, btr_cur_get_block(cursor),
index, thr, mtr, inherit);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
}
}
@@ -3573,7 +3598,8 @@ fail_err:
ut_ad(thr->graph->trx->id
== trx_read_trx_id(
static_cast<const byte*>(
- trx_id->data)));
+ trx_id->data))
+ || index->table->is_temporary());
}
}
#endif
@@ -4093,7 +4119,8 @@ btr_cur_parse_update_in_place(
flags != (BTR_NO_UNDO_LOG_FLAG
| BTR_NO_LOCKING_FLAG
| BTR_KEEP_SYS_FLAG)
- || page_is_leaf(page),
+ || page_is_leaf(page)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
@@ -4234,7 +4261,8 @@ btr_cur_update_in_place(
index = cursor->index;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
- ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG));
+ ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
+ || index->table->is_temporary());
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
@@ -4535,7 +4563,8 @@ btr_cur_optimistic_update(
page = buf_block_get_frame(block);
rec = btr_cur_get_rec(cursor);
index = cursor->index;
- ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG));
+ ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
+ || index->table->is_temporary());
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
/* This is intended only for leaf page updates */
@@ -4551,7 +4580,7 @@ btr_cur_optimistic_update(
ut_ad(fil_page_index_page_check(page));
ut_ad(btr_page_get_index_id(page) == index->id);
- *offsets = rec_get_offsets(rec, index, *offsets, true,
+ *offsets = rec_get_offsets(rec, index, *offsets, index->n_core_fields,
ULINT_UNDEFINED, heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(rec, *offsets)
@@ -4892,8 +4921,8 @@ btr_cur_pessimistic_update(
ut_ad(!page_zip || !index->table->is_temporary());
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
- ut_ad(trx_id > 0
- || (flags & BTR_KEEP_SYS_FLAG));
+ ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
+ || index->table->is_temporary());
ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
|| dict_index_is_clust(index));
ut_ad(thr_get_trx(thr)->id == trx_id
@@ -5409,7 +5438,8 @@ btr_cur_parse_del_mark_set_clust_rec(
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields_in_recovery(
rec, page_zip,
- rec_get_offsets(rec, index, offsets, true,
+ rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
pos + 2, &heap),
pos, trx_id, roll_ptr);
} else {
@@ -5418,7 +5448,8 @@ btr_cur_parse_del_mark_set_clust_rec(
ut_ad(memcmp(rec_get_nth_field(
rec,
rec_get_offsets(rec, index,
- offsets, true,
+ offsets, index
+ ->n_core_fields,
pos, &heap),
pos, &offset),
field_ref_zero, DATA_TRX_ID_LEN));
@@ -5753,7 +5784,8 @@ btr_cur_optimistic_delete_func(
rec = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(rec, cursor->index, offsets, true,
+ offsets = rec_get_offsets(rec, cursor->index, offsets,
+ cursor->index->n_core_fields,
ULINT_UNDEFINED, &heap);
const ibool no_compress_needed = !rec_offs_any_extern(offsets)
@@ -5961,7 +5993,8 @@ btr_cur_pessimistic_delete(
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
- offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page),
+ offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
if (rec_offs_any_extern(offsets)) {
@@ -6061,7 +6094,7 @@ discard_page:
pointer as the predefined minimum record */
min_mark_next_rec = true;
- } else if (dict_index_is_spatial(index)) {
+ } else if (index->is_spatial()) {
/* For rtree, if delete the leftmost node pointer,
we need to update parent page. */
rtr_mbr_t father_mbr;
@@ -6076,7 +6109,7 @@ discard_page:
&father_cursor);
offsets = rec_get_offsets(
btr_cur_get_rec(&father_cursor), index, NULL,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
father_rec = btr_cur_get_rec(&father_cursor);
rtr_read_mbr(rec_get_nth_field(
@@ -6998,12 +7031,13 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index)
page = btr_cur_get_page(&cursor);
rec = page_rec_get_next(page_get_infimum_rec(page));
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page)
+ ? index->n_core_fields : 0;
if (!page_rec_is_supremum(rec)) {
not_empty_flag = 1;
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
if (n_not_null != NULL) {
@@ -7024,7 +7058,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index)
offsets_next_rec = rec_get_offsets(next_rec, index,
offsets_next_rec,
- is_leaf,
+ n_core,
ULINT_UNDEFINED,
&heap);
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index a68d6fa771d..b22d9f8323d 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (C) 2012, 2014 Facebook, Inc. All Rights Reserved.
-Copyright (C) 2014, 2019, MariaDB Corporation.
+Copyright (C) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -340,12 +340,12 @@ btr_defragment_calc_n_recs_for_size(
ulint size = 0;
page_cur_t cur;
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
page_cur_set_before_first(block, &cur);
page_cur_move_to_next(&cur);
while (page_cur_get_rec(&cur) != page_get_supremum_rec(page)) {
rec_t* cur_rec = page_cur_get_rec(&cur);
- offsets = rec_get_offsets(cur_rec, index, offsets,
- page_is_leaf(page),
+ offsets = rec_get_offsets(cur_rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
ulint rec_size = rec_offs_size(offsets);
size += rec_size;
@@ -357,6 +357,9 @@ btr_defragment_calc_n_recs_for_size(
page_cur_move_to_next(&cur);
}
*n_recs_size = size;
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
return n_recs;
}
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 9c5216dc015..2c3f06da111 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2020, MariaDB Corporation.
+Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -61,6 +61,7 @@ btr_pcur_reset(
cursor->btr_cur.index = NULL;
cursor->btr_cur.page_cur.rec = NULL;
cursor->old_rec = NULL;
+ cursor->old_n_core_fields = 0;
cursor->old_n_fields = 0;
cursor->old_stored = false;
@@ -151,7 +152,8 @@ before_first:
ut_ad(!page_rec_is_infimum(rec));
if (UNIV_UNLIKELY(rec_is_metadata(rec, *index))) {
- ut_ad(index->table->instant);
+ ut_ad(index->table->instant
+ || block->page.id.page_no() != index->page);
ut_ad(page_get_n_recs(block->frame) == 1);
ut_ad(page_is_leaf(block->frame));
ut_ad(!page_has_prev(block->frame));
@@ -165,11 +167,8 @@ before_first:
if (rec_is_metadata(rec, *index)) {
ut_ad(!page_has_prev(block->frame));
- ut_d(const rec_t* p = rec);
rec = page_rec_get_next(rec);
if (page_rec_is_supremum(rec)) {
- ut_ad(page_has_next(block->frame)
- || rec_is_alter_metadata(p, *index));
goto before_first;
}
}
@@ -181,19 +180,21 @@ before_first:
if (index->is_ibuf()) {
ut_ad(!index->table->not_redundant());
- cursor->old_n_fields = rec_get_n_fields_old(rec);
- } else if (page_rec_is_leaf(rec)) {
- cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
- } else if (index->is_spatial()) {
- ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
- == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
- /* For R-tree, we have to compare
- the child page numbers as well. */
- cursor->old_n_fields = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
+ cursor->old_n_fields = uint16_t(rec_get_n_fields_old(rec));
} else {
- cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
+ cursor->old_n_fields = static_cast<uint16>(
+ dict_index_get_n_unique_in_tree(index));
+ if (index->is_spatial() && !page_rec_is_leaf(rec)) {
+ ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
+ == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
+ /* For R-tree, we have to compare
+ the child page numbers as well. */
+ cursor->old_n_fields
+ = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
+ }
}
+ cursor->old_n_core_fields = index->n_core_fields;
cursor->old_rec = rec_copy_prefix_to_buf(rec, index,
cursor->old_n_fields,
&cursor->old_rec_buf,
@@ -228,6 +229,7 @@ btr_pcur_copy_stored_position(
+ (pcur_donate->old_rec - pcur_donate->old_rec_buf);
}
+ pcur_receive->old_n_core_fields = pcur_donate->old_n_core_fields;
pcur_receive->old_n_fields = pcur_donate->old_n_fields;
}
@@ -319,6 +321,8 @@ btr_pcur_restore_position_func(
}
ut_a(cursor->old_rec);
+ ut_a(cursor->old_n_core_fields);
+ ut_a(cursor->old_n_core_fields <= index->n_core_fields);
ut_a(cursor->old_n_fields);
switch (latch_mode) {
@@ -352,11 +356,16 @@ btr_pcur_restore_position_func(
rec_offs_init(offsets2_);
heap = mem_heap_create(256);
+ ut_ad(cursor->old_n_core_fields
+ == index->n_core_fields);
+
offsets1 = rec_get_offsets(
- cursor->old_rec, index, offsets1, true,
+ cursor->old_rec, index, offsets1,
+ cursor->old_n_core_fields,
cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(
- rec, index, offsets2, true,
+ rec, index, offsets2,
+ index->n_core_fields,
cursor->old_n_fields, &heap);
ut_ad(!cmp_rec_rec(cursor->old_rec,
@@ -381,8 +390,14 @@ btr_pcur_restore_position_func(
heap = mem_heap_create(256);
- tuple = dict_index_build_data_tuple(cursor->old_rec, index, true,
- cursor->old_n_fields, heap);
+ tuple = dtuple_create(heap, cursor->old_n_fields);
+
+ dict_index_copy_types(tuple, index, cursor->old_n_fields);
+
+ rec_copy_prefix_to_dtuple(tuple, cursor->old_rec, index,
+ cursor->old_n_core_fields,
+ cursor->old_n_fields, heap);
+ ut_ad(dtuple_check_typed(tuple));
/* Save the old search mode of the cursor */
old_mode = cursor->search_mode;
@@ -421,7 +436,8 @@ btr_pcur_restore_position_func(
&& btr_pcur_is_on_user_rec(cursor)
&& !cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
rec_get_offsets(btr_pcur_get_rec(cursor),
- index, offsets, true,
+ index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap))) {
/* We have to store the NEW value for the modify clock,
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 6a1163e5cf9..2eae4cf503f 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -666,6 +666,12 @@ btr_search_update_hash_ref(
return;
}
+ if (cursor->index != index) {
+ ut_ad(cursor->index->id == index->id);
+ btr_search_drop_page_hash_index(block);
+ return;
+ }
+
ut_ad(block->page.id.space() == index->table->space_id);
ut_ad(index == cursor->index);
ut_ad(!dict_index_is_ibuf(index));
@@ -690,7 +696,8 @@ btr_search_update_hash_ref(
ulint fold = rec_fold(
rec,
- rec_get_offsets(rec, index, offsets_, true,
+ rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap),
block->curr_n_fields,
block->curr_n_bytes, index->id);
@@ -749,7 +756,8 @@ btr_search_check_guess(
match = 0;
- offsets = rec_get_offsets(rec, cursor->index, offsets, true,
+ offsets = rec_get_offsets(rec, cursor->index, offsets,
+ cursor->index->n_core_fields,
n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match);
@@ -800,7 +808,8 @@ btr_search_check_guess(
}
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
- true, n_unique, &heap);
+ cursor->index->n_core_fields,
+ n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, prev_rec, offsets, &match);
if (mode == PAGE_CUR_GE) {
@@ -823,7 +832,8 @@ btr_search_check_guess(
}
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
- true, n_unique, &heap);
+ cursor->index->n_core_fields,
+ n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, next_rec, offsets, &match);
if (mode == PAGE_CUR_LE) {
@@ -1125,15 +1135,26 @@ retry:
% btr_ahi_parts;
latch = btr_search_latches[ahi_slot];
- rw_lock_s_lock(latch);
+ dict_index_t* index = block->index;
+
+ bool is_freed = index && index->freed();
+ if (is_freed) {
+ rw_lock_x_lock(latch);
+ } else {
+ rw_lock_s_lock(latch);
+ }
+
assert_block_ahi_valid(block);
- if (!block->index || !btr_search_enabled) {
- rw_lock_s_unlock(latch);
+ if (!index || !btr_search_enabled) {
+ if (is_freed) {
+ rw_lock_x_unlock(latch);
+ } else {
+ rw_lock_s_unlock(latch);
+ }
return;
}
- dict_index_t* index = block->index;
#ifdef MYSQL_INDEX_DISABLE_AHI
ut_ad(!index->disable_ahi);
#endif
@@ -1149,7 +1170,9 @@ retry:
/* NOTE: The AHI fields of block must not be accessed after
releasing search latch, as the index page might only be s-latched! */
- rw_lock_s_unlock(latch);
+ if (!is_freed) {
+ rw_lock_s_unlock(latch);
+ }
ut_a(n_fields > 0 || n_bytes > 0);
@@ -1176,7 +1199,7 @@ retry:
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets, index->n_core_fields,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
@@ -1200,16 +1223,18 @@ next_rec:
mem_heap_free(heap);
}
- rw_lock_x_lock(latch);
+ if (!is_freed) {
+ rw_lock_x_lock(latch);
- if (UNIV_UNLIKELY(!block->index)) {
- /* Someone else has meanwhile dropped the hash index */
+ if (UNIV_UNLIKELY(!block->index)) {
+ /* Someone else has meanwhile dropped the
+ hash index */
+ goto cleanup;
+ }
- goto cleanup;
+ ut_a(block->index == index);
}
- ut_a(block->index == index);
-
if (block->curr_n_fields != n_fields
|| block->curr_n_bytes != n_bytes) {
@@ -1400,7 +1425,7 @@ btr_search_build_page_hash_index(
ut_a(index->id == btr_page_get_index_id(page));
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets, index->n_core_fields,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
ut_ad(page_rec_is_supremum(rec)
@@ -1431,7 +1456,7 @@ btr_search_build_page_hash_index(
}
offsets = rec_get_offsets(
- next_rec, index, offsets, true,
+ next_rec, index, offsets, index->n_core_fields,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
@@ -1583,6 +1608,7 @@ btr_search_move_or_delete_hash_entries(
rw_lock_t* ahi_latch = index ? btr_get_search_latch(index) : NULL;
if (new_block->index) {
+drop_exit:
btr_search_drop_page_hash_index(block);
return;
}
@@ -1594,6 +1620,12 @@ btr_search_move_or_delete_hash_entries(
rw_lock_s_lock(ahi_latch);
if (block->index) {
+
+ if (block->index != index) {
+ rw_lock_s_unlock(ahi_latch);
+ goto drop_exit;
+ }
+
ulint n_fields = block->curr_n_fields;
ulint n_bytes = block->curr_n_bytes;
ibool left_side = block->curr_left_side;
@@ -1614,7 +1646,6 @@ btr_search_move_or_delete_hash_entries(
ut_ad(left_side == block->curr_left_side);
return;
}
-
rw_lock_s_unlock(ahi_latch);
}
@@ -1652,6 +1683,12 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
return;
}
+ if (index != cursor->index) {
+ ut_ad(index->id == cursor->index->id);
+ btr_search_drop_page_hash_index(block);
+ return;
+ }
+
ut_ad(block->page.id.space() == index->table->space_id);
ut_a(index == cursor->index);
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
@@ -1659,7 +1696,8 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
rec = btr_cur_get_rec(cursor);
- fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, true,
+ fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
@@ -1725,6 +1763,12 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
return;
}
+ if (cursor->index != index) {
+ ut_ad(cursor->index->id == index->id);
+ btr_search_drop_page_hash_index(block);
+ return;
+ }
+
ut_a(cursor->index == index);
ut_ad(!dict_index_is_ibuf(index));
rw_lock_x_lock(ahi_latch);
@@ -1814,6 +1858,12 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
#ifdef MYSQL_INDEX_DISABLE_AHI
ut_a(!index->disable_ahi);
#endif
+ if (index != cursor->index) {
+ ut_ad(index->id == cursor->index->id);
+ btr_search_drop_page_hash_index(block);
+ return;
+ }
+
ut_a(index == cursor->index);
ut_ad(!dict_index_is_ibuf(index));
@@ -1824,13 +1874,14 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
ins_rec = page_rec_get_next_const(rec);
next_rec = page_rec_get_next_const(ins_rec);
- offsets = rec_get_offsets(ins_rec, index, offsets, true,
+ offsets = rec_get_offsets(ins_rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
if (!page_rec_is_supremum(next_rec)) {
offsets = rec_get_offsets(
- next_rec, index, offsets, true,
+ next_rec, index, offsets, index->n_core_fields,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
@@ -1842,7 +1893,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, *index)) {
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets, index->n_core_fields,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else {
@@ -2048,7 +2099,8 @@ btr_search_hash_table_validate(ulint hash_table_id)
page_index_id = btr_page_get_index_id(block->frame);
offsets = rec_get_offsets(
- node->data, block->index, offsets, true,
+ node->data, block->index, offsets,
+ block->index->n_core_fields,
btr_search_get_n_fields(block->curr_n_fields,
block->curr_n_bytes),
&heap);
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 1462c847e09..85bf8f2a059 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -1911,6 +1911,10 @@ buf_pool_init_instance(
ut_free(buf_pool->chunks);
buf_pool_mutex_exit(buf_pool);
+ /* InnoDB should free the mutex which was
+ created so far before freeing the instance */
+ mutex_free(&buf_pool->mutex);
+ mutex_free(&buf_pool->zip_mutex);
return(DB_ERROR);
}
@@ -5713,6 +5717,9 @@ loop:
memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
memset(frame + FIL_PAGE_LSN, 0, 8);
+ /* mark page as just allocated for check in
+ buf_flush_init_for_writing() */
+ ut_d(memset(frame + FIL_PAGE_SPACE_OR_CHKSUM, 0, 4));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(++buf_dbg_counter % 5771 || buf_validate());
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 45c9b661d6f..2cfca67ddd1 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -783,7 +783,17 @@ buf_flush_init_for_writing(
|| &block->page.zip == page_zip_);
ut_ad(!block || newest_lsn);
ut_ad(page);
- ut_ad(!newest_lsn || fil_page_get_type(page));
+ /* Encryption key rotation procedure can write dummy log records to
+ update page's space id, what causes page LSN update, and we need some
+ additional check during recovery to be sure the page is freshly
+ allocated, see buf_page_create() to find such patterns */
+ ut_ad(fil_page_get_type(page)
+ || (!newest_lsn
+ || (mach_read_from_4(page + FIL_PAGE_SPACE_ID)
+ == block->page.id.space()
+ && mach_read_from_4(page + FIL_PAGE_PREV) == 0xffffffff
+ && mach_read_from_4(page + FIL_PAGE_NEXT) == 0xffffffff
+ && !mach_read_from_4(page + FIL_PAGE_SPACE_OR_CHKSUM))));
if (page_zip_) {
page_zip_des_t* page_zip;
diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc
index 03c471c35fb..fe849d8ae29 100644
--- a/storage/innobase/data/data0data.cc
+++ b/storage/innobase/data/data0data.cc
@@ -686,7 +686,7 @@ dtuple_convert_big_rec(
goto skip_field;
}
- longest_i = i;
+ longest_i = i + mblob;
longest = savings;
skip_field:
@@ -767,7 +767,7 @@ void
dtuple_convert_back_big_rec(
/*========================*/
dict_index_t* index MY_ATTRIBUTE((unused)), /*!< in: index */
- dtuple_t* entry, /*!< in: entry whose data was put to vector */
+ dtuple_t* entry, /*!< in/out: entry whose data was put to vector */
big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
{
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index ea551adea41..8a8095f2226 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -270,7 +270,7 @@ dict_table_try_drop_aborted(
&& !UT_LIST_GET_FIRST(table->locks)) {
/* Silence a debug assertion in row_merge_drop_indexes(). */
ut_d(table->acquire());
- row_merge_drop_indexes(trx, table, TRUE);
+ row_merge_drop_indexes(trx, table, true);
ut_d(table->release());
ut_ad(table->get_ref_count() == ref_count);
trx_commit_for_mysql(trx);
@@ -4849,7 +4849,9 @@ dict_index_build_node_ptr(
dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4);
- rec_copy_prefix_to_dtuple(tuple, rec, index, !level, n_unique, heap);
+ rec_copy_prefix_to_dtuple(tuple, rec, index,
+ level ? 0 : index->n_core_fields,
+ n_unique, heap);
dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple)
| REC_STATUS_NODE_PTR);
@@ -4873,11 +4875,14 @@ dict_index_build_data_tuple(
ulint n_fields,
mem_heap_t* heap)
{
+ ut_ad(!index->is_clust());
+
dtuple_t* tuple = dtuple_create(heap, n_fields);
dict_index_copy_types(tuple, index, n_fields);
- rec_copy_prefix_to_dtuple(tuple, rec, index, leaf, n_fields, heap);
+ rec_copy_prefix_to_dtuple(tuple, rec, index,
+ leaf ? n_fields : 0, n_fields, heap);
ut_ad(dtuple_check_typed(tuple));
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index ddd2b99ef21..2741e29740a 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -939,7 +939,7 @@ dict_mem_fill_vcol_from_v_indexes(
Later virtual column set will be
refreshed during loading of table. */
if (!dict_index_has_virtual(index)
- || index->has_new_v_col) {
+ || index->has_new_v_col()) {
continue;
}
@@ -1375,7 +1375,8 @@ dict_index_t::vers_history_row(
rec_t* clust_rec =
row_get_clust_rec(BTR_SEARCH_LEAF, rec, this, &clust_index, &mtr);
if (clust_rec) {
- offsets = rec_get_offsets(clust_rec, clust_index, offsets, true,
+ offsets = rec_get_offsets(clust_rec, clust_index, offsets,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
history_row = clust_index->vers_history_row(clust_rec, offsets);
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index dbb70ec1c6d..c5ff0c56951 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2009, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -178,8 +178,8 @@ dict_stats_persistent_storage_check(
{"table_name", DATA_VARMYSQL,
DATA_NOT_NULL, 597},
- {"last_update", DATA_FIXBINARY,
- DATA_NOT_NULL, 4},
+ {"last_update", DATA_INT,
+ DATA_NOT_NULL | DATA_UNSIGNED, 4},
{"n_rows", DATA_INT,
DATA_NOT_NULL | DATA_UNSIGNED, 8},
@@ -209,8 +209,8 @@ dict_stats_persistent_storage_check(
{"index_name", DATA_VARMYSQL,
DATA_NOT_NULL, 192},
- {"last_update", DATA_FIXBINARY,
- DATA_NOT_NULL, 4},
+ {"last_update", DATA_INT,
+ DATA_NOT_NULL | DATA_UNSIGNED, 4},
{"stat_name", DATA_VARMYSQL,
DATA_NOT_NULL, 64*3},
@@ -1157,7 +1157,7 @@ dict_stats_analyze_index_level(
prev_rec_offsets = rec_get_offsets(
prev_rec, index, prev_rec_offsets,
- true,
+ index->n_core_fields,
n_uniq, &heap);
prev_rec = rec_copy_prefix_to_buf(
@@ -1169,8 +1169,9 @@ dict_stats_analyze_index_level(
continue;
}
- rec_offsets = rec_get_offsets(
- rec, index, rec_offsets, !level, n_uniq, &heap);
+ rec_offsets = rec_get_offsets(rec, index, rec_offsets,
+ level ? 0 : index->n_core_fields,
+ n_uniq, &heap);
(*total_recs)++;
@@ -1178,7 +1179,8 @@ dict_stats_analyze_index_level(
ulint matched_fields;
prev_rec_offsets = rec_get_offsets(
- prev_rec, index, prev_rec_offsets, !level,
+ prev_rec, index, prev_rec_offsets,
+ level ? 0 : index->n_core_fields,
n_uniq, &heap);
cmp_rec_rec(prev_rec, rec,
@@ -1332,7 +1334,7 @@ be big enough)
@param[in] index index of the page
@param[in] page the page to scan
@param[in] n_prefix look at the first n_prefix columns
-@param[in] is_leaf whether this is the leaf page
+@param[in] n_core 0, or index->n_core_fields for leaf
@param[out] n_diff number of distinct records encountered
@param[out] n_external_pages if this is non-NULL then it will be set
to the number of externally stored pages which were encountered
@@ -1347,7 +1349,7 @@ dict_stats_scan_page(
const dict_index_t* index,
const page_t* page,
ulint n_prefix,
- bool is_leaf,
+ ulint n_core,
ib_uint64_t* n_diff,
ib_uint64_t* n_external_pages)
{
@@ -1359,9 +1361,9 @@ dict_stats_scan_page(
Because offsets1,offsets2 should be big enough,
this memory heap should never be used. */
mem_heap_t* heap = NULL;
- ut_ad(is_leaf == page_is_leaf(page));
+ ut_ad(!!n_core == page_is_leaf(page));
const rec_t* (*get_next)(const rec_t*)
- = !is_leaf || srv_stats_include_delete_marked
+ = !n_core || srv_stats_include_delete_marked
? page_rec_get_next_const
: page_rec_get_next_non_del_marked;
@@ -1380,7 +1382,7 @@ dict_stats_scan_page(
return(NULL);
}
- offsets_rec = rec_get_offsets(rec, index, offsets_rec, is_leaf,
+ offsets_rec = rec_get_offsets(rec, index, offsets_rec, n_core,
ULINT_UNDEFINED, &heap);
if (should_count_external_pages) {
@@ -1397,7 +1399,7 @@ dict_stats_scan_page(
ulint matched_fields;
offsets_next_rec = rec_get_offsets(next_rec, index,
- offsets_next_rec, is_leaf,
+ offsets_next_rec, n_core,
ULINT_UNDEFINED,
&heap);
@@ -1411,7 +1413,7 @@ dict_stats_scan_page(
(*n_diff)++;
- if (!is_leaf) {
+ if (!n_core) {
break;
}
}
@@ -1497,7 +1499,7 @@ dict_stats_analyze_index_below_cur(
rec = btr_cur_get_rec(cur);
ut_ad(!page_rec_is_leaf(rec));
- offsets_rec = rec_get_offsets(rec, index, offsets1, false,
+ offsets_rec = rec_get_offsets(rec, index, offsets1, 0,
ULINT_UNDEFINED, &heap);
page_id_t page_id(index->table->space_id,
@@ -1531,7 +1533,7 @@ dict_stats_analyze_index_below_cur(
/* search for the first non-boring record on the page */
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- false, n_diff, NULL);
+ 0, n_diff, NULL);
/* pages on level > 0 are not allowed to be empty */
ut_a(offsets_rec != NULL);
@@ -1576,7 +1578,7 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- true, n_diff,
+ index->n_core_fields, n_diff,
n_external_pages);
#if 0
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 2d358f2c9e3..c37d89181d9 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -156,9 +156,24 @@ schedule new estimates for table and index statistics to be calculated.
void dict_stats_update_if_needed_func(dict_table_t *table)
#endif
{
- ut_ad(table->stat_initialized);
ut_ad(!mutex_own(&dict_sys.mutex));
+ if (UNIV_UNLIKELY(!table->stat_initialized)) {
+ /* The table may have been evicted from dict_sys
+ and reloaded internally by InnoDB for FOREIGN KEY
+ processing, but not reloaded by the SQL layer.
+
+ We can (re)compute the transient statistics when the
+ table is actually loaded by the SQL layer.
+
+ Note: If InnoDB persistent statistics are enabled,
+ we will skip the updates. We must do this, because
+ dict_table_get_n_rows() below assumes that the
+ statistics have been initialized. The DBA may have
+ to execute ANALYZE TABLE. */
+ return;
+ }
+
ulonglong counter = table->stat_modified_counter++;
ulonglong n_rows = dict_table_get_n_rows(table);
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 5442ee285db..c5323ed0ffc 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -488,12 +488,16 @@ static bool fil_node_open_file(fil_node_t* node)
const bool first_time_open = node->size == 0;
- bool o_direct_possible = !FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags);
- if (const ulint ssize = FSP_FLAGS_GET_ZIP_SSIZE(space->flags)) {
- compile_time_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096);
- if (ssize < 3) {
- o_direct_possible = false;
- }
+ ulint type;
+ static_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096,
+ "compatibility");
+ switch (FSP_FLAGS_GET_ZIP_SSIZE(space->flags)) {
+ case 1:
+ case 2:
+ type = OS_DATA_FILE_NO_O_DIRECT;
+ break;
+ default:
+ type = OS_DATA_FILE;
}
if (first_time_open
@@ -514,9 +518,7 @@ retry:
? OS_FILE_OPEN_RAW | OS_FILE_ON_ERROR_NO_EXIT
: OS_FILE_OPEN | OS_FILE_ON_ERROR_NO_EXIT,
OS_FILE_AIO,
- o_direct_possible
- ? OS_DATA_FILE
- : OS_DATA_FILE_NO_O_DIRECT,
+ type,
read_only_mode,
&success);
@@ -556,9 +558,7 @@ fail:
? OS_FILE_OPEN_RAW | OS_FILE_ON_ERROR_NO_EXIT
: OS_FILE_OPEN | OS_FILE_ON_ERROR_NO_EXIT,
OS_FILE_AIO,
- o_direct_possible
- ? OS_DATA_FILE
- : OS_DATA_FILE_NO_O_DIRECT,
+ type,
read_only_mode,
&success);
}
@@ -887,15 +887,14 @@ fil_space_extend_must_retry(
}
}
-/*******************************************************************//**
-Reserves the fil_system.mutex and tries to make sure we can open at least one
+/** Reserves the fil_system.mutex and tries to make sure we can open at least one
file while holding it. This should be called before calling
-fil_node_prepare_for_io(), because that function may need to open a file. */
+fil_node_prepare_for_io(), because that function may need to open a file.
+@param[in] space_id tablespace id
+@return whether the tablespace is usable for io */
static
-void
-fil_mutex_enter_and_prepare_for_io(
-/*===============================*/
- ulint space_id) /*!< in: space id */
+bool
+fil_mutex_enter_and_prepare_for_io(ulint space_id)
{
for (ulint count = 0;;) {
mutex_enter(&fil_system.mutex);
@@ -908,7 +907,7 @@ fil_mutex_enter_and_prepare_for_io(
fil_space_t* space = fil_space_get_by_id(space_id);
if (space == NULL) {
- break;
+ return false;
}
fil_node_t* node = UT_LIST_GET_LAST(space->chain);
@@ -923,6 +922,10 @@ fil_mutex_enter_and_prepare_for_io(
the insert buffer. The insert buffer is in
tablespace 0, and we cannot end up waiting in
this function. */
+ } else if (space->is_stopping() && !space->is_being_truncated) {
+ /* If the tablespace is being deleted then InnoDB
+ shouldn't prepare the tablespace for i/o */
+ return false;
} else if (!node || node->is_open()) {
/* If the file is already open, no need to do
anything; if the space does not exist, we handle the
@@ -994,6 +997,8 @@ fil_mutex_enter_and_prepare_for_io(
break;
}
+
+ return true;
}
/** Try to extend a tablespace if it is smaller than the specified size.
@@ -1010,7 +1015,10 @@ fil_space_extend(
bool success;
do {
- fil_mutex_enter_and_prepare_for_io(space->id);
+ if (!fil_mutex_enter_and_prepare_for_io(space->id)) {
+ success = false;
+ break;
+ }
} while (fil_space_extend_must_retry(
space, UT_LIST_GET_LAST(space->chain), size,
&success));
@@ -1365,7 +1373,9 @@ fil_space_t* fil_system_t::read_page0(ulint id)
/* It is possible that the tablespace is dropped while we are
not holding the mutex. */
- fil_mutex_enter_and_prepare_for_io(id);
+ if (!fil_mutex_enter_and_prepare_for_io(id)) {
+ return NULL;
+ }
fil_space_t* space = fil_space_get_by_id(id);
@@ -2802,7 +2812,6 @@ fil_rename_tablespace(
ut_ad(strchr(new_file_name, OS_PATH_SEPARATOR) != NULL);
if (!recv_recovery_is_on()) {
- fil_name_write_rename(id, old_file_name, new_file_name);
log_mutex_enter();
}
@@ -2895,13 +2904,22 @@ fil_ibd_create(
return NULL;
}
+ ulint type;
+ static_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096,
+ "compatibility");
+ switch (FSP_FLAGS_GET_ZIP_SSIZE(flags)) {
+ case 1:
+ case 2:
+ type = OS_DATA_FILE_NO_O_DIRECT;
+ break;
+ default:
+ type = OS_DATA_FILE;
+ }
+
file = os_file_create(
innodb_data_file_key, path,
OS_FILE_CREATE | OS_FILE_ON_ERROR_NO_EXIT,
- OS_FILE_NORMAL,
- OS_DATA_FILE,
- srv_read_only_mode,
- &success);
+ OS_FILE_NORMAL, type, srv_read_only_mode, &success);
if (!success) {
/* The following call will print an error message */
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 4bdc556cee8..dbfd37544e6 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2020, MariaDB Corporation.
+Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1708,7 +1708,7 @@ fts_drop_tables(
error = fts_drop_common_tables(trx, &fts_table);
- if (error == DB_SUCCESS) {
+ if (error == DB_SUCCESS && table->fts) {
error = fts_drop_all_index_tables(trx, table->fts);
}
@@ -1730,7 +1730,7 @@ fts_create_in_mem_aux_table(
dict_table_t* new_table = dict_mem_table_create(
aux_table_name, NULL, n_cols, 0, table->flags,
table->space_id == TRX_SYS_SPACE
- ? 0 : table->space->purpose == FIL_TYPE_TEMPORARY
+ ? 0 : table->space_id == SRV_TMP_SPACE_ID
? DICT_TF2_TEMPORARY : DICT_TF2_USE_FILE_PER_TABLE);
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
@@ -2518,7 +2518,8 @@ fts_get_max_cache_size(
}
} else {
ib::error() << "(" << error << ") reading max"
- " cache config value from config table";
+ " cache config value from config table "
+ << fts_table->table->name;
}
ut_free(value.f_str);
@@ -2691,7 +2692,8 @@ func_exit:
} else {
*doc_id = 0;
- ib::error() << "(" << error << ") while getting next doc id.";
+ ib::error() << "(" << error << ") while getting next doc id "
+ "for table " << table->name;
fts_sql_rollback(trx);
if (error == DB_DEADLOCK) {
@@ -2771,7 +2773,8 @@ fts_update_sync_doc_id(
cache->synced_doc_id = doc_id;
} else {
ib::error() << "(" << error << ") while"
- " updating last doc id.";
+ " updating last doc id for table"
+ << table->name;
fts_sql_rollback(trx);
}
@@ -3482,7 +3485,8 @@ fts_add_doc_by_id(
}
- offsets = rec_get_offsets(clust_rec, clust_index, NULL, true,
+ offsets = rec_get_offsets(clust_rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
for (ulint i = 0; i < num_idx; ++i) {
@@ -3996,7 +4000,8 @@ fts_sync_write_words(
if (UNIV_UNLIKELY(error != DB_SUCCESS) && !print_error) {
ib::error() << "(" << error << ") writing"
- " word node to FTS auxiliary index table.";
+ " word node to FTS auxiliary index table "
+ << table->name;
print_error = TRUE;
}
}
@@ -4151,7 +4156,8 @@ fts_sync_commit(
fts_sql_commit(trx);
} else {
fts_sql_rollback(trx);
- ib::error() << "(" << error << ") during SYNC.";
+ ib::error() << "(" << error << ") during SYNC of "
+ "table " << sync->table->name;
}
if (UNIV_UNLIKELY(fts_enable_diag_print) && elapsed_time) {
@@ -4922,7 +4928,8 @@ fts_get_rows_count(
trx->error_state = DB_SUCCESS;
} else {
ib::error() << "(" << error
- << ") while reading FTS table.";
+ << ") while reading FTS table "
+ << table_name;
break; /* Exit the loop. */
}
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index 170fb2e8a57..122402eb34d 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2020, MariaDB Corporation.
+Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -87,8 +87,9 @@ rtr_page_split_initialize_nodes(
stop = task + n_recs;
rec = page_rec_get_next(page_get_infimum_rec(page));
- const bool is_leaf = page_is_leaf(page);
- *offsets = rec_get_offsets(rec, cursor->index, *offsets, is_leaf,
+ const ulint n_core = page_is_leaf(page)
+ ? cursor->index->n_core_fields : 0;
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets, n_core,
n_uniq, &heap);
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
@@ -101,7 +102,7 @@ rtr_page_split_initialize_nodes(
rec = page_rec_get_next(rec);
*offsets = rec_get_offsets(rec, cursor->index, *offsets,
- is_leaf, n_uniq, &heap);
+ n_core, n_uniq, &heap);
source_cur = rec_get_nth_field(rec, *offsets, 0, &len);
}
@@ -308,7 +309,8 @@ rtr_update_mbr_field(
page_zip = buf_block_get_page_zip(block);
child = btr_node_ptr_get_child_page_no(rec, offsets);
- const bool is_leaf = page_is_leaf(block->frame);
+ const ulint n_core = page_is_leaf(block->frame)
+ ? index->n_core_fields : 0;
if (new_rec) {
child_rec = new_rec;
@@ -324,7 +326,7 @@ rtr_update_mbr_field(
if (cursor2) {
rec_t* del_rec = btr_cur_get_rec(cursor2);
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
- index, NULL, false,
+ index, NULL, 0,
ULINT_UNDEFINED, &heap);
del_page_no = btr_node_ptr_get_child_page_no(del_rec, offsets2);
cur2_pos = page_rec_get_n_recs_before(btr_cur_get_rec(cursor2));
@@ -389,7 +391,7 @@ rtr_update_mbr_field(
= page_rec_get_nth(page, cur2_pos);
}
offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2),
- index, NULL, false,
+ index, NULL, 0,
ULINT_UNDEFINED, &heap);
ut_ad(del_page_no == btr_node_ptr_get_child_page_no(
cursor2->page_cur.rec,
@@ -427,7 +429,7 @@ rtr_update_mbr_field(
ut_ad(old_rec != insert_rec);
page_cur_position(old_rec, block, &page_cur);
- offsets2 = rec_get_offsets(old_rec, index, NULL, is_leaf,
+ offsets2 = rec_get_offsets(old_rec, index, NULL, n_core,
ULINT_UNDEFINED, &heap);
page_cur_delete_rec(&page_cur, index, offsets2, mtr);
@@ -457,7 +459,7 @@ update_mbr:
cur2_rec = cursor2->page_cur.rec;
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
cur2_rec_info = rec_get_info_bits(cur2_rec,
@@ -517,7 +519,7 @@ update_mbr:
if (ins_suc) {
btr_cur_position(index, insert_rec, block, cursor);
offsets = rec_get_offsets(insert_rec,
- index, offsets, is_leaf,
+ index, offsets, n_core,
ULINT_UNDEFINED, &heap);
}
@@ -532,7 +534,7 @@ update_mbr:
cur2_rec = btr_cur_get_rec(cursor2);
offsets2 = rec_get_offsets(cur2_rec, index, NULL,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
/* If the cursor2 position is on a wrong rec, we
@@ -546,7 +548,7 @@ update_mbr:
while (!page_rec_is_supremum(cur2_rec)) {
offsets2 = rec_get_offsets(cur2_rec, index,
NULL,
- is_leaf,
+ n_core,
ULINT_UNDEFINED,
&heap);
cur2_pno = btr_node_ptr_get_child_page_no(
@@ -836,7 +838,8 @@ rtr_split_page_move_rec_list(
rec_move = static_cast<rtr_rec_move_t*>(mem_heap_alloc(
heap,
sizeof (*rec_move) * max_to_move));
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page)
+ ? index->n_core_fields : 0;
/* Insert the recs in group 2 to new page. */
for (cur_split_node = node_array;
@@ -846,10 +849,10 @@ rtr_split_page_move_rec_list(
block, cur_split_node->key);
offsets = rec_get_offsets(cur_split_node->key,
- index, offsets, is_leaf,
+ index, offsets, n_core,
ULINT_UNDEFINED, &heap);
- ut_ad(!is_leaf || cur_split_node->key != first_rec);
+ ut_ad(!n_core || cur_split_node->key != first_rec);
rec = page_cur_insert_rec_low(
page_cur_get_rec(&new_page_cursor),
@@ -884,7 +887,7 @@ rtr_split_page_move_rec_list(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (is_leaf && !index->table->is_temporary()) {
+ if (n_core && !index->table->is_temporary()) {
page_update_max_trx_id(new_block, NULL,
page_get_max_trx_id(page),
mtr);
@@ -937,7 +940,7 @@ rtr_split_page_move_rec_list(
block, &page_cursor);
offsets = rec_get_offsets(
page_cur_get_rec(&page_cursor), index,
- offsets, is_leaf, ULINT_UNDEFINED,
+ offsets, n_core, ULINT_UNDEFINED,
&heap);
page_cur_delete_rec(&page_cursor,
index, offsets, mtr);
@@ -1136,6 +1139,9 @@ func_start:
/* Update the lock table */
lock_rtr_move_rec_list(new_block, block, rec_move, moved);
+ const ulint n_core = page_level
+ ? 0 : cursor->index->n_core_fields;
+
/* Delete recs in first group from the new page. */
for (cur_split_node = rtr_split_node_array;
cur_split_node < end_split_node - 1; ++cur_split_node) {
@@ -1154,7 +1160,7 @@ func_start:
*offsets = rec_get_offsets(
page_cur_get_rec(page_cursor),
- cursor->index, *offsets, !page_level,
+ cursor->index, *offsets, n_core,
ULINT_UNDEFINED, heap);
page_cur_delete_rec(page_cursor,
@@ -1171,7 +1177,7 @@ func_start:
block, page_cursor);
*offsets = rec_get_offsets(
page_cur_get_rec(page_cursor),
- cursor->index, *offsets, !page_level,
+ cursor->index, *offsets, n_core,
ULINT_UNDEFINED, heap);
page_cur_delete_rec(page_cursor,
cursor->index, *offsets, mtr);
@@ -1400,7 +1406,8 @@ rtr_page_copy_rec_list_end_no_locks(
rec_offs offsets_2[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets2 = offsets_2;
ulint moved = 0;
- bool is_leaf = page_is_leaf(new_page);
+ const ulint n_core = page_is_leaf(new_page)
+ ? index->n_core_fields : 0;
rec_offs_init(offsets_1);
rec_offs_init(offsets_2);
@@ -1429,14 +1436,14 @@ rtr_page_copy_rec_list_end_no_locks(
cur_rec = page_rec_get_next(cur_rec);
}
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
ULINT_UNDEFINED, &heap);
while (!page_rec_is_supremum(cur_rec)) {
ulint cur_matched_fields = 0;
int cmp;
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
cmp = cmp_rec_rec(cur1_rec, cur_rec,
offsets1, offsets2, index, false,
@@ -1448,7 +1455,7 @@ rtr_page_copy_rec_list_end_no_locks(
/* Skip small recs. */
page_cur_move_to_next(&page_cur);
cur_rec = page_cur_get_rec(&page_cur);
- } else if (is_leaf) {
+ } else if (n_core) {
if (rec_get_deleted_flag(cur1_rec,
dict_table_is_comp(index->table))) {
goto next;
@@ -1471,7 +1478,7 @@ rtr_page_copy_rec_list_end_no_locks(
cur_rec = page_cur_get_rec(&page_cur);
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur_rec, index,
@@ -1527,7 +1534,8 @@ rtr_page_copy_rec_list_start_no_locks(
rec_offs* offsets2 = offsets_2;
page_cur_t page_cur;
ulint moved = 0;
- bool is_leaf = page_is_leaf(buf_block_get_frame(block));
+ const ulint n_core = page_is_leaf(buf_block_get_frame(block))
+ ? index->n_core_fields : 0;
rec_offs_init(offsets_1);
rec_offs_init(offsets_2);
@@ -1547,14 +1555,14 @@ rtr_page_copy_rec_list_start_no_locks(
cur_rec = page_rec_get_next(cur_rec);
}
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
ULINT_UNDEFINED, &heap);
while (!page_rec_is_supremum(cur_rec)) {
ulint cur_matched_fields = 0;
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
int cmp = cmp_rec_rec(cur1_rec, cur_rec,
offsets1, offsets2, index, false,
@@ -1567,7 +1575,7 @@ rtr_page_copy_rec_list_start_no_locks(
/* Skip small recs. */
page_cur_move_to_next(&page_cur);
cur_rec = page_cur_get_rec(&page_cur);
- } else if (is_leaf) {
+ } else if (n_core) {
if (rec_get_deleted_flag(
cur1_rec,
dict_table_is_comp(index->table))) {
@@ -1591,7 +1599,7 @@ rtr_page_copy_rec_list_start_no_locks(
cur_rec = page_cur_get_rec(&page_cur);
- offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf,
+ offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur_rec, index,
@@ -1745,7 +1753,7 @@ rtr_check_same_block(
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(
- rec, index, NULL, false, ULINT_UNDEFINED, &heap);
+ rec, index, NULL, 0, ULINT_UNDEFINED, &heap);
if (btr_node_ptr_get_child_page_no(rec, offsets) == page_no) {
btr_cur_position(index, rec, parentb, cursor);
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 849e080728f..18f75e3d139 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -530,8 +530,7 @@ rtr_compare_cursor_rec(
rec = btr_cur_get_rec(cursor);
- offsets = rec_get_offsets(
- rec, index, NULL, false, ULINT_UNDEFINED, heap);
+ offsets = rec_get_offsets(rec, index, NULL, 0, ULINT_UNDEFINED, heap);
return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no);
}
@@ -836,7 +835,8 @@ rtr_page_get_father_node_ptr(
user_rec = btr_cur_get_rec(cursor);
ut_a(page_rec_is_user_rec(user_rec));
- offsets = rec_get_offsets(user_rec, index, offsets, !level,
+ offsets = rec_get_offsets(user_rec, index, offsets,
+ level ? 0 : index->n_fields,
ULINT_UNDEFINED, &heap);
rtr_get_mbr_from_rec(user_rec, offsets, &mbr);
@@ -853,7 +853,7 @@ rtr_page_get_father_node_ptr(
node_ptr = btr_cur_get_rec(cursor);
ut_ad(!page_rec_is_comp(node_ptr)
|| rec_get_status(node_ptr) == REC_STATUS_NODE_PTR);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets);
@@ -871,13 +871,14 @@ rtr_page_get_father_node_ptr(
print_rec = page_rec_get_next(
page_get_infimum_rec(page_align(user_rec)));
offsets = rec_get_offsets(print_rec, index, offsets,
- page_rec_is_leaf(user_rec),
+ page_rec_is_leaf(user_rec)
+ ? index->n_fields : 0,
ULINT_UNDEFINED, &heap);
error << "; child ";
rec_print(error.m_oss, print_rec,
rec_get_info_bits(print_rec, rec_offs_comp(offsets)),
offsets);
- offsets = rec_get_offsets(node_ptr, index, offsets, false,
+ offsets = rec_get_offsets(node_ptr, index, offsets, 0,
ULINT_UNDEFINED, &heap);
error << "; parent ";
rec_print(error.m_oss, print_rec,
@@ -1309,10 +1310,12 @@ rtr_cur_restore_position(
heap = mem_heap_create(256);
offsets1 = rec_get_offsets(
- r_cursor->old_rec, index, NULL, !level,
+ r_cursor->old_rec, index, NULL,
+ level ? 0 : r_cursor->old_n_fields,
r_cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(
- rec, index, NULL, !level,
+ rec, index, NULL,
+ level ? 0 : r_cursor->old_n_fields,
r_cursor->old_n_fields, &heap);
comp = rec_offs_comp(offsets1);
@@ -1379,12 +1382,12 @@ search_again:
rec = btr_pcur_get_rec(r_cursor);
- offsets1 = rec_get_offsets(
- r_cursor->old_rec, index, NULL, !level,
- r_cursor->old_n_fields, &heap);
- offsets2 = rec_get_offsets(
- rec, index, NULL, !level,
- r_cursor->old_n_fields, &heap);
+ offsets1 = rec_get_offsets(r_cursor->old_rec, index, NULL,
+ level ? 0 : r_cursor->old_n_fields,
+ r_cursor->old_n_fields, &heap);
+ offsets2 = rec_get_offsets(rec, index, NULL,
+ level ? 0 : r_cursor->old_n_fields,
+ r_cursor->old_n_fields, &heap);
comp = rec_offs_comp(offsets1);
@@ -1673,7 +1676,7 @@ rtr_cur_search_with_match(
page = buf_block_get_frame(block);
const ulint level = btr_page_get_level(page);
- const bool is_leaf = !level;
+ const ulint n_core = level ? 0 : index->n_fields;
if (mode == PAGE_CUR_RTREE_LOCATE) {
ut_ad(level != 0);
@@ -1695,7 +1698,7 @@ rtr_cur_search_with_match(
ulint new_rec_size = rec_get_converted_size(index, tuple, 0);
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
dtuple_get_n_fields_cmp(tuple),
&heap);
@@ -1716,10 +1719,10 @@ rtr_cur_search_with_match(
}
while (!page_rec_is_supremum(rec)) {
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
dtuple_get_n_fields_cmp(tuple),
&heap);
- if (!is_leaf) {
+ if (!n_core) {
switch (mode) {
case PAGE_CUR_CONTAIN:
case PAGE_CUR_INTERSECT:
@@ -1800,7 +1803,7 @@ rtr_cur_search_with_match(
to rtr_info->path for non-leaf nodes, or
rtr_info->matches for leaf nodes */
if (rtr_info && mode != PAGE_CUR_RTREE_INSERT) {
- if (!is_leaf) {
+ if (!n_core) {
ulint page_no;
node_seq_t new_seq;
bool is_loc;
@@ -1811,7 +1814,7 @@ rtr_cur_search_with_match(
== PAGE_CUR_RTREE_GET_FATHER);
offsets = rec_get_offsets(
- rec, index, offsets, false,
+ rec, index, offsets, 0,
ULINT_UNDEFINED, &heap);
page_no = btr_node_ptr_get_child_page_no(
@@ -1860,7 +1863,8 @@ rtr_cur_search_with_match(
/* Collect matched records on page */
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets,
+ index->n_fields,
ULINT_UNDEFINED, &heap);
rtr_leaf_push_match_rec(
rec, rtr_info, offsets,
@@ -1883,7 +1887,7 @@ rtr_cur_search_with_match(
/* All records on page are searched */
if (page_rec_is_supremum(rec)) {
- if (!is_leaf) {
+ if (!n_core) {
if (!found) {
/* No match case, if it is for insertion,
then we select the record that result in
@@ -1893,7 +1897,7 @@ rtr_cur_search_with_match(
ut_ad(least_inc < DBL_MAX);
offsets = rec_get_offsets(
best_rec, index, offsets,
- false, ULINT_UNDEFINED, &heap);
+ 0, ULINT_UNDEFINED, &heap);
child_no =
btr_node_ptr_get_child_page_no(
best_rec, offsets);
@@ -1945,11 +1949,11 @@ rtr_cur_search_with_match(
/* Verify the record to be positioned is the same
as the last record in matched_rec vector */
offsets2 = rec_get_offsets(test_rec.r_rec, index,
- offsets2, true,
+ offsets2, index->n_fields,
ULINT_UNDEFINED, &heap);
offsets = rec_get_offsets(last_match_rec, index,
- offsets, true,
+ offsets, index->n_fields,
ULINT_UNDEFINED, &heap);
ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec,
@@ -1966,9 +1970,8 @@ rtr_cur_search_with_match(
ulint child_no;
ut_ad(!last_match_rec && rec);
- offsets = rec_get_offsets(
- rec, index, offsets, false,
- ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, 0,
+ ULINT_UNDEFINED, &heap);
child_no = btr_node_ptr_get_child_page_no(rec, offsets);
@@ -1976,7 +1979,7 @@ rtr_cur_search_with_match(
index, rtr_info->parent_path, level, child_no,
block, rec, 0);
- } else if (rtr_info && found && !is_leaf) {
+ } else if (rtr_info && found && !n_core) {
rec = last_match_rec;
}
@@ -1986,11 +1989,11 @@ rtr_cur_search_with_match(
#ifdef UNIV_DEBUG
/* Verify that we are positioned at the same child page as pushed in
the path stack */
- if (!is_leaf && (!page_rec_is_supremum(rec) || found)
+ if (!n_core && (!page_rec_is_supremum(rec) || found)
&& mode != PAGE_CUR_RTREE_INSERT) {
ulint page_no;
- offsets = rec_get_offsets(rec, index, offsets, false,
+ offsets = rec_get_offsets(rec, index, offsets, 0,
ULINT_UNDEFINED, &heap);
page_no = btr_node_ptr_get_child_page_no(rec, offsets);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 345f2a37d5d..68b7a018821 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -4,7 +4,7 @@ Copyright (c) 2000, 2020, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -62,6 +62,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <my_service_manager.h>
#include <key.h>
+#include <sql_manager.h>
/* Include necessary InnoDB headers */
#include "btr0btr.h"
@@ -200,7 +201,6 @@ static char* innodb_large_prefix;
stopword table to be used */
static char* innobase_server_stopword_table;
-static my_bool innobase_use_atomic_writes;
static my_bool innobase_use_checksums;
static my_bool innobase_locks_unsafe_for_binlog;
static my_bool innobase_rollback_on_timeout;
@@ -1849,9 +1849,7 @@ thd_to_trx_id(
return(thd_to_trx(thd)->id);
}
-static int
-wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd,
- my_bool signal);
+static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool);
static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid);
static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid);
#endif /* WITH_WSREP */
@@ -2478,6 +2476,72 @@ innobase_raw_format(
return(ut_str_sql_format(buf_tmp, buf_tmp_used, buf, buf_size));
}
+/*
+The helper function nlz(x) calculates the number of leading zeros
+in the binary representation of the number "x", either using a
+built-in compiler function or a substitute trick based on the use
+of the multiplication operation and a table indexed by the prefix
+of the multiplication result:
+*/
+#ifdef __GNUC__
+#define nlz(x) __builtin_clzll(x)
+#elif defined(_MSC_VER) && !defined(_M_CEE_PURE) && \
+ (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64))
+#ifndef __INTRIN_H_
+#pragma warning(push, 4)
+#pragma warning(disable: 4255 4668)
+#include <intrin.h>
+#pragma warning(pop)
+#endif
+__forceinline unsigned int nlz (ulonglong x)
+{
+#if defined(_M_IX86) || defined(_M_X64)
+ unsigned long n;
+#ifdef _M_X64
+ _BitScanReverse64(&n, x);
+ return (unsigned int) n ^ 63;
+#else
+ unsigned long y = (unsigned long) (x >> 32);
+ unsigned int m = 31;
+ if (y == 0)
+ {
+ y = (unsigned long) x;
+ m = 63;
+ }
+ _BitScanReverse(&n, y);
+ return (unsigned int) n ^ m;
+#endif
+#elif defined(_M_ARM64)
+ return _CountLeadingZeros(x);
+#endif
+}
+#else
+inline unsigned int nlz (ulonglong x)
+{
+ static unsigned char table [48] = {
+ 32, 6, 5, 0, 4, 12, 0, 20,
+ 15, 3, 11, 0, 0, 18, 25, 31,
+ 8, 14, 2, 0, 10, 0, 0, 0,
+ 0, 0, 0, 21, 0, 0, 19, 26,
+ 7, 0, 13, 0, 16, 1, 22, 27,
+ 9, 0, 17, 23, 28, 24, 29, 30
+ };
+ unsigned int y= (unsigned int) (x >> 32);
+ unsigned int n= 0;
+ if (y == 0) {
+ y= (unsigned int) x;
+ n= 32;
+ }
+ y = y | (y >> 1); // Propagate leftmost 1-bit to the right.
+ y = y | (y >> 2);
+ y = y | (y >> 4);
+ y = y | (y >> 8);
+ y = y & ~(y >> 16);
+ y = y * 0x3EF5D037;
+ return n + table[y >> 26];
+}
+#endif
+
/*********************************************************************//**
Compute the next autoinc value.
@@ -2506,85 +2570,93 @@ innobase_next_autoinc(
ulonglong max_value) /*!< in: max value for type */
{
ulonglong next_value;
- ulonglong block = need * step;
+ ulonglong block;
/* Should never be 0. */
ut_a(need > 0);
- ut_a(block > 0);
+ ut_a(step > 0);
ut_a(max_value > 0);
- /*
- Allow auto_increment to go over max_value up to max ulonglong.
- This allows us to detect that all values are exhausted.
- If we don't do this, we will return max_value several times
- and get duplicate key errors instead of auto increment value
- out of range.
- */
- max_value= (~(ulonglong) 0);
+ /*
+ We need to calculate the "block" value equal to the product
+ "step * need". However, when calculating this product, an integer
+ overflow can occur, so we cannot simply use the usual multiplication
+ operation. The snippet below calculates the product of two numbers
+ and detects an unsigned integer overflow:
+ */
+ unsigned int m= nlz(need);
+ unsigned int n= nlz(step);
+ if (m + n <= 8 * sizeof(ulonglong) - 2) {
+ // The bit width of the original values is too large,
+ // therefore we are guaranteed to get an overflow.
+ goto overflow;
+ }
+ block = need * (step >> 1);
+ if ((longlong) block < 0) {
+ goto overflow;
+ }
+ block += block;
+ if (step & 1) {
+ block += need;
+ if (block < need) {
+ goto overflow;
+ }
+ }
+
+ /* Check for overflow. Current can be > max_value if the value
+ is in reality a negative value. Also, the visual studio compiler
+ converts large double values (which hypothetically can then be
+ passed here as the values of the "current" parameter) automatically
+ into unsigned long long datatype maximum value: */
+ if (current > max_value) {
+ goto overflow;
+ }
/* According to MySQL documentation, if the offset is greater than
the step then the offset is ignored. */
- if (offset > block) {
+ if (offset > step) {
offset = 0;
}
- /* Check for overflow. Current can be > max_value if the value is
- in reality a negative value.The visual studio compilers converts
- large double values automatically into unsigned long long datatype
- maximum value */
-
- if (block >= max_value
- || offset > max_value
- || current >= max_value
- || max_value - offset <= offset) {
-
- next_value = max_value;
+ /*
+ Let's round the current value to within a step-size block:
+ */
+ if (current > offset) {
+ next_value = current - offset;
} else {
- ut_a(max_value > current);
-
- ulonglong free = max_value - current;
-
- if (free < offset || free - offset <= block) {
- next_value = max_value;
- } else {
- next_value = 0;
- }
+ next_value = offset - current;
}
+ next_value -= next_value % step;
- if (next_value == 0) {
- ulonglong next;
-
- if (current > offset) {
- next = (current - offset) / step;
- } else {
- next = (offset - current) / step;
- }
-
- ut_a(max_value > next);
- next_value = next * step;
- /* Check for multiplication overflow. */
- ut_a(next_value >= next);
- ut_a(max_value > next_value);
-
- /* Check for overflow */
- if (max_value - next_value >= block) {
-
- next_value += block;
-
- if (max_value - next_value >= offset) {
- next_value += offset;
- } else {
- next_value = max_value;
- }
- } else {
- next_value = max_value;
- }
+ /*
+ Add an offset to the next value and check that the addition
+ does not cause an integer overflow:
+ */
+ next_value += offset;
+ if (next_value < offset) {
+ goto overflow;
}
- ut_a(next_value != 0);
- ut_a(next_value <= max_value);
+ /*
+ Add a block to the next value and check that the addition
+ does not cause an integer overflow:
+ */
+ next_value += block;
+ if (next_value < block) {
+ goto overflow;
+ }
return(next_value);
+
+overflow:
+ /*
+ Allow auto_increment to go over max_value up to max ulonglong.
+ This allows us to detect that all values are exhausted.
+ If we don't do this, we will return max_value several times
+ and get duplicate key errors instead of auto increment value
+ out of range:
+ */
+ return(~(ulonglong) 0);
}
/********************************************************************//**
@@ -3473,10 +3545,12 @@ ha_innobase::init_table_handle_for_HANDLER(void)
reset_template();
}
-/** Free tablespace resources allocated. */
-void innobase_space_shutdown()
+/*********************************************************************//**
+Free any resources that were allocated and return failure.
+@return always return 1 */
+static int innodb_init_abort()
{
- DBUG_ENTER("innobase_space_shutdown");
+ DBUG_ENTER("innodb_init_abort");
if (fil_system.temp_space) {
fil_system.temp_space->close();
@@ -3491,19 +3565,21 @@ void innobase_space_shutdown()
#ifdef WITH_INNODB_DISALLOW_WRITES
os_event_destroy(srv_allow_writes_event);
#endif /* WITH_INNODB_DISALLOW_WRITES */
-
- DBUG_VOID_RETURN;
-}
-
-/** Free any resources that were allocated and return failure.
-@return always return 1 */
-static int innodb_init_abort()
-{
- DBUG_ENTER("innodb_init_abort");
- innobase_space_shutdown();
DBUG_RETURN(1);
}
+/** Deprecation message about innodb_idle_flush_pct */
+static const char* deprecated_idle_flush_pct
+ = "innodb_idle_flush_pct is DEPRECATED and has no effect.";
+
+static const char* deprecated_innodb_checksum_algorithm
+ = "Setting innodb_checksum_algorithm to values other than"
+ " crc32, full_crc32, strict_crc32 or strict_full_crc32"
+ " is UNSAFE and DEPRECATED."
+ " These deprecated values will be disallowed in MariaDB 10.6.";
+
+static ulong innodb_idle_flush_pct;
+
/** If applicable, emit a message that log checksums cannot be disabled.
@param[in,out] thd client session, or NULL if at startup
@param[in] check whether redo log block checksums are enabled
@@ -3530,6 +3606,23 @@ innodb_log_checksums_func_update(THD* thd, bool check)
return(check);
}
+static void innodb_checksum_algorithm_update(THD *thd, st_mysql_sys_var*,
+ void *, const void *save)
+{
+ srv_checksum_algorithm= *static_cast<const ulong*>(save);
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ break;
+ default:
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_UNSUPPORTED,
+ deprecated_innodb_checksum_algorithm);
+ }
+}
+
/****************************************************************//**
Gives the file extension of an InnoDB single-table tablespace. */
static const char* ha_innobase_exts[] = {
@@ -3954,9 +4047,18 @@ static int innodb_init_params()
if (!innobase_use_checksums) {
ib::warn() << "Setting innodb_checksums to OFF is DEPRECATED."
- " This option may be removed in future releases. You"
- " should set innodb_checksum_algorithm=NONE instead.";
+ " This option was removed in MariaDB 10.5.";
srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_NONE;
+ } else {
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ break;
+ default:
+ ib::warn() << deprecated_innodb_checksum_algorithm;
+ }
}
innodb_log_checksums = innodb_log_checksums_func_update(
@@ -4010,21 +4112,16 @@ static int innodb_init_params()
innobase_commit_concurrency_init_default();
- srv_use_atomic_writes
- = innobase_use_atomic_writes && my_may_have_atomic_write;
- if (srv_use_atomic_writes && !srv_file_per_table)
- {
- fprintf(stderr, "InnoDB: Disabling atomic_writes as file_per_table is not used.\n");
- srv_use_atomic_writes= 0;
- }
+ if (innodb_idle_flush_pct != 100) {
+ ib::warn() << deprecated_idle_flush_pct;
+ }
- if (srv_use_atomic_writes) {
- fprintf(stderr, "InnoDB: using atomic writes.\n");
+#ifndef _WIN32
+ if (srv_use_atomic_writes && my_may_have_atomic_write) {
/*
Force O_DIRECT on Unixes (on Windows writes are always
unbuffered)
*/
-#ifndef _WIN32
switch (innodb_flush_method) {
case SRV_O_DIRECT:
case SRV_O_DIRECT_NO_FSYNC:
@@ -4033,8 +4130,8 @@ static int innodb_init_params()
innodb_flush_method = SRV_O_DIRECT;
fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n");
}
-#endif
}
+#endif
if (srv_read_only_mode) {
ib::info() << "Started in read only mode";
@@ -4122,8 +4219,10 @@ static int innodb_init(void* p)
innobase_hton->flush_logs = innobase_flush_logs;
innobase_hton->show_status = innobase_show_status;
innobase_hton->flags =
- HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS
- | HTON_NATIVE_SYS_VERSIONING | HTON_WSREP_REPLICATION;
+ HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS |
+ HTON_NATIVE_SYS_VERSIONING |
+ HTON_WSREP_REPLICATION |
+ HTON_REQUIRES_CLOSE_AFTER_TRUNCATE;
#ifdef WITH_WSREP
innobase_hton->abort_transaction=wsrep_abort_transaction;
@@ -4309,7 +4408,6 @@ innobase_end(handlerton*, ha_panic_function)
}
innodb_shutdown();
- innobase_space_shutdown();
mysql_mutex_destroy(&commit_cond_m);
mysql_cond_destroy(&commit_cond);
@@ -5037,6 +5135,7 @@ innobase_close_connection(
if (trx) {
+ thd_set_ha_data(thd, hton, NULL);
if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) {
sql_print_error("Transaction not registered for MariaDB 2PC, "
@@ -5075,7 +5174,7 @@ rollback_and_free:
DBUG_RETURN(0);
}
-UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock);
+void lock_cancel_waiting_and_release(lock_t *lock);
/** Cancel any pending lock request associated with the current THD.
@sa THD::awake() @sa ha_kill_query() */
@@ -5085,6 +5184,7 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels)
if (trx_t* trx= thd_to_trx(thd))
{
+ ut_ad(trx->mysql_thd == thd);
#ifdef WITH_WSREP
if (trx->is_wsrep() && wsrep_thd_is_aborting(thd))
/* if victim has been signaled by BF thread and/or aborting is already
@@ -5093,28 +5193,15 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels)
DBUG_VOID_RETURN;
#endif /* WITH_WSREP */
lock_mutex_enter();
- mutex_enter(&trx_sys.mutex);
- trx_mutex_enter(trx);
- /* It is possible that innobase_close_connection() is concurrently
- being executed on our victim. Even if the trx object is later
- reused for another client connection or a background transaction,
- its trx->mysql_thd will differ from our thd.
-
- trx_t::state changes are protected by trx_t::mutex, and
- trx_sys.trx_list is protected by trx_sys.mutex, in
- both trx_create() and trx_t::free().
-
- At this point, trx may have been reallocated for another client
- connection, or for a background operation. In that case, either
- trx_t::state or trx_t::mysql_thd should not match our expectations. */
- bool cancel= trx->mysql_thd == thd && trx->state == TRX_STATE_ACTIVE &&
- !trx->lock.was_chosen_as_deadlock_victim;
- mutex_exit(&trx_sys.mutex);
- if (!cancel);
- else if (lock_t *lock= trx->lock.wait_lock)
+ if (lock_t *lock= trx->lock.wait_lock)
+ {
+ trx_mutex_enter(trx);
+ if (trx->is_wsrep() && wsrep_thd_is_aborting(thd))
+ trx->lock.was_chosen_as_deadlock_victim= TRUE;
lock_cancel_waiting_and_release(lock);
+ trx_mutex_exit(trx);
+ }
lock_mutex_exit();
- trx_mutex_exit(trx);
}
DBUG_VOID_RETURN;
@@ -6108,13 +6195,6 @@ ha_innobase::open(const char* name, int, uint)
innobase_copy_frm_flags_from_table_share(ib_table, table->s);
- /* No point to init any statistics if tablespace is still encrypted. */
- if (ib_table->is_readable()) {
- dict_stats_init(ib_table);
- } else {
- ib_table->stat_initialized = 1;
- }
-
MONITOR_INC(MONITOR_TABLE_OPEN);
if ((ib_table->flags2 & DICT_TF2_DISCARDED)) {
@@ -6307,11 +6387,14 @@ ha_innobase::open(const char* name, int, uint)
}
}
- if (table && m_prebuilt->table) {
- ut_ad(table->versioned() == m_prebuilt->table->versioned());
+ ut_ad(!m_prebuilt->table
+ || table->versioned() == m_prebuilt->table->versioned());
+
+ if (!THDVAR(thd, background_thread)) {
+ info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST
+ | HA_STATUS_OPEN);
}
- info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_OPEN);
DBUG_RETURN(0);
}
@@ -6520,8 +6603,8 @@ wsrep_innobase_mysql_sort(
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_VARCHAR:
{
- uchar tmp_str[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'};
- uint tmp_length = REC_VERSION_56_MAX_INDEX_COL_LEN;
+ uchar *tmp_str;
+ uint tmp_length;
/* Use the charset number to pick the right charset struct for
the comparison. Since the MySQL function get_charset may be
@@ -6544,7 +6627,11 @@ wsrep_innobase_mysql_sort(
}
}
- ut_a(str_length <= tmp_length);
+ // Note that strnxfrm may change length of string
+ tmp_length= charset->coll->strnxfrmlen(charset, str_length);
+ tmp_length= ut_max(str_length, tmp_length) + 1;
+ tmp_str= static_cast<uchar *>(ut_malloc_nokey(tmp_length));
+ ut_ad(str_length <= tmp_length);
memcpy(tmp_str, str, str_length);
tmp_length = charset->coll->strnxfrm(charset, str, str_length,
@@ -6568,6 +6655,7 @@ wsrep_innobase_mysql_sort(
ret_length = tmp_length;
}
+ ut_free(tmp_str);
break;
}
case MYSQL_TYPE_DECIMAL :
@@ -6919,7 +7007,7 @@ wsrep_store_key_val_for_row(
THD* thd,
TABLE* table,
uint keynr, /*!< in: key number */
- char* buff, /*!< in/out: buffer for the key value (in MySQL
+ uchar* buff, /*!< in/out: buffer for the key value (in MySQL
format) */
uint buff_len,/*!< in: buffer length */
const uchar* record,
@@ -6928,7 +7016,7 @@ wsrep_store_key_val_for_row(
KEY* key_info = table->key_info + keynr;
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
- char* buff_start = buff;
+ uchar* buff_start = buff;
enum_field_types mysql_type;
Field* field;
uint buff_space = buff_len;
@@ -6940,7 +7028,8 @@ wsrep_store_key_val_for_row(
for (; key_part != end; key_part++) {
- uchar sorted[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'};
+ uchar *sorted=NULL;
+ uint max_len=0;
ibool part_is_null = FALSE;
if (key_part->null_bit) {
@@ -7019,10 +7108,14 @@ wsrep_store_key_val_for_row(
true_len = key_len;
}
+ max_len= true_len;
+ sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1));
memcpy(sorted, data, true_len);
true_len = wsrep_innobase_mysql_sort(
mysql_type, cs->number, sorted, true_len,
- REC_VERSION_56_MAX_INDEX_COL_LEN);
+ max_len);
+ ut_ad(true_len <= max_len);
+
if (wsrep_protocol_version > 1) {
/* Note that we always reserve the maximum possible
length of the true VARCHAR in the key value, though
@@ -7107,11 +7200,13 @@ wsrep_store_key_val_for_row(
true_len = key_len;
}
+ max_len= true_len;
+ sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1));
memcpy(sorted, blob_data, true_len);
true_len = wsrep_innobase_mysql_sort(
mysql_type, cs->number, sorted, true_len,
- REC_VERSION_56_MAX_INDEX_COL_LEN);
-
+ max_len);
+ ut_ad(true_len <= max_len);
/* Note that we always reserve the maximum possible
length of the BLOB prefix in the key value. */
@@ -7187,10 +7282,14 @@ wsrep_store_key_val_for_row(
cs->mbmaxlen),
&error);
}
+
+ max_len= true_len;
+ sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1));
memcpy(sorted, src_start, true_len);
true_len = wsrep_innobase_mysql_sort(
mysql_type, cs->number, sorted, true_len,
- REC_VERSION_56_MAX_INDEX_COL_LEN);
+ max_len);
+ ut_ad(true_len <= max_len);
if (true_len > buff_space) {
fprintf (stderr,
@@ -7205,6 +7304,11 @@ wsrep_store_key_val_for_row(
buff += true_len;
buff_space -= true_len;
}
+
+ if (sorted) {
+ ut_free(sorted);
+ sorted= NULL;
+ }
}
ut_a(buff <= buff_start + buff_len);
@@ -8014,7 +8118,6 @@ ha_innobase::write_row(
/* Handling of errors related to auto-increment. */
if (auto_inc_used) {
ulonglong auto_inc;
- ulonglong col_max_value;
/* Note the number of rows processed for this statement, used
by get_auto_increment() to determine the number of AUTO-INC
@@ -8024,11 +8127,6 @@ ha_innobase::write_row(
--trx->n_autoinc_rows;
}
- /* We need the upper limit of the col type to check for
- whether we update the table autoinc counter or not. */
- col_max_value =
- table->next_number_field->get_max_int_value();
-
/* Get the value that MySQL attempted to store in the table.*/
auto_inc = table->next_number_field->val_uint();
@@ -8095,36 +8193,25 @@ ha_innobase::write_row(
if (auto_inc >= m_prebuilt->autoinc_last_value) {
set_max_autoinc:
+ /* We need the upper limit of the col type to check for
+ whether we update the table autoinc counter or not. */
+ ulonglong col_max_value =
+ table->next_number_field->get_max_int_value();
+
/* This should filter out the negative
values set explicitly by the user. */
if (auto_inc <= col_max_value) {
+ ut_ad(m_prebuilt->autoinc_increment > 0);
ulonglong offset;
ulonglong increment;
dberr_t err;
-#ifdef WITH_WSREP
- /* Applier threads which are processing
- ROW events and don't go through server
- level autoinc processing, therefore
- m_prebuilt autoinc values don't get
- properly assigned. Fetch values from
- server side. */
- if (trx->is_wsrep() &&
- wsrep_thd_is_applying(m_user_thd))
- {
- wsrep_thd_auto_increment_variables(
- m_user_thd, &offset, &increment);
- }
- else
-#endif /* WITH_WSREP */
- {
- ut_a(m_prebuilt->autoinc_increment > 0);
- offset = m_prebuilt->autoinc_offset;
- increment = m_prebuilt->autoinc_increment;
- }
+
+ offset = m_prebuilt->autoinc_offset;
+ increment = m_prebuilt->autoinc_increment;
+
auto_inc = innobase_next_autoinc(
- auto_inc,
- 1, increment, offset,
+ auto_inc, 1, increment, offset,
col_max_value);
err = innobase_set_max_autoinc(
@@ -8652,6 +8739,8 @@ wsrep_calc_row_hash(
for (uint i = 0; i < table->s->fields; i++) {
byte null_byte=0;
byte true_byte=1;
+ ulint col_type;
+ ulint is_unsigned;
const Field* field = table->field[i];
if (!field->stored_in_db()) {
@@ -8660,8 +8749,9 @@ wsrep_calc_row_hash(
ptr = (const byte*) row + get_field_offset(table, field);
len = field->pack_length();
+ col_type = get_innobase_type_from_mysql_type(&is_unsigned, field);
- switch (prebuilt->table->cols[i].mtype) {
+ switch (col_type) {
case DATA_BLOB:
ptr = row_mysql_read_blob_ref(&len, ptr, len);
@@ -8779,6 +8869,20 @@ ha_innobase::update_row(
MySQL that the row is not really updated and it
should not increase the count of updated rows.
This is fix for http://bugs.mysql.com/29157 */
+ if (m_prebuilt->versioned_write
+ && thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE
+ /* Multiple UPDATE of same rows in single transaction create
+ historical rows only once. */
+ && trx->id != table->vers_start_id()) {
+ error = row_insert_for_mysql((byte*) old_row,
+ m_prebuilt,
+ ROW_INS_HISTORICAL);
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ }
+ innobase_srv_conc_exit_innodb(m_prebuilt);
+ innobase_active_small();
+ }
DBUG_RETURN(HA_ERR_RECORD_IS_THE_SAME);
} else {
const bool vers_set_fields = m_prebuilt->versioned_write
@@ -8812,39 +8916,37 @@ ha_innobase::update_row(
/* A value for an AUTO_INCREMENT column
was specified in the UPDATE statement. */
- ulonglong offset;
- ulonglong increment;
-#ifdef WITH_WSREP
- /* Applier threads which are processing
- ROW events and don't go through server
- level autoinc processing, therefore
- m_prebuilt autoinc values don't get
- properly assigned. Fetch values from
- server side. */
- if (trx->is_wsrep() && wsrep_thd_is_applying(m_user_thd))
- wsrep_thd_auto_increment_variables(
- m_user_thd, &offset, &increment);
- else
-#endif /* WITH_WSREP */
- offset = m_prebuilt->autoinc_offset,
- increment = m_prebuilt->autoinc_increment;
-
- autoinc = innobase_next_autoinc(
- autoinc, 1, increment, offset,
- table->found_next_number_field->get_max_int_value());
-
- error = innobase_set_max_autoinc(autoinc);
-
- if (m_prebuilt->table->persistent_autoinc) {
- /* Update the PAGE_ROOT_AUTO_INC. Yes, we do
- this even if dict_table_t::autoinc already was
- greater than autoinc, because we cannot know
- if any INSERT actually used (and wrote to
- PAGE_ROOT_AUTO_INC) a value bigger than our
- autoinc. */
- btr_write_autoinc(dict_table_get_first_index(
- m_prebuilt->table),
- autoinc);
+ /* We need the upper limit of the col type to check for
+ whether we update the table autoinc counter or not. */
+ ulonglong col_max_value =
+ table->found_next_number_field->get_max_int_value();
+
+ /* This should filter out the negative
+ values set explicitly by the user. */
+ if (autoinc <= col_max_value) {
+ ulonglong offset;
+ ulonglong increment;
+
+ offset = m_prebuilt->autoinc_offset;
+ increment = m_prebuilt->autoinc_increment;
+
+ autoinc = innobase_next_autoinc(
+ autoinc, 1, increment, offset,
+ col_max_value);
+
+ error = innobase_set_max_autoinc(autoinc);
+
+ if (m_prebuilt->table->persistent_autoinc) {
+ /* Update the PAGE_ROOT_AUTO_INC. Yes, we do
+ this even if dict_table_t::autoinc already was
+ greater than autoinc, because we cannot know
+ if any INSERT actually used (and wrote to
+ PAGE_ROOT_AUTO_INC) a value bigger than our
+ autoinc. */
+ btr_write_autoinc(dict_table_get_first_index(
+ m_prebuilt->table),
+ autoinc);
+ }
}
}
@@ -10295,7 +10397,7 @@ wsrep_append_key(
THD *thd,
trx_t *trx,
TABLE_SHARE *table_share,
- const char* key,
+ const uchar* key,
uint16_t key_len,
Wsrep_service_key_type key_type /*!< in: access type of this key
(shared, exclusive, semi...) */
@@ -10407,8 +10509,8 @@ ha_innobase::wsrep_append_keys(
if (wsrep_protocol_version == 0) {
uint len;
- char keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
- char *key = &keyval[0];
+ uchar keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
+ uchar *key = &keyval[0];
ibool is_null;
len = wsrep_store_key_val_for_row(
@@ -10449,12 +10551,12 @@ ha_innobase::wsrep_append_keys(
/* keyval[] shall contain an ordinal number at byte 0
and the actual key data shall be written at byte 1.
Hence the total data length is the key length + 1 */
- char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
- char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
- keyval0[0] = (char)i;
- keyval1[0] = (char)i;
- char* key0 = &keyval0[1];
- char* key1 = &keyval1[1];
+ uchar keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1]= {'\0'};
+ uchar keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1]= {'\0'};
+ keyval0[0] = (uchar)i;
+ keyval1[0] = (uchar)i;
+ uchar* key0 = &keyval0[1];
+ uchar* key1 = &keyval1[1];
if (!tab) {
WSREP_WARN("MariaDB-InnoDB key mismatch %s %s",
@@ -10528,22 +10630,20 @@ ha_innobase::wsrep_append_keys(
/* if no PK, calculate hash of full row, to be the key value */
if (!key_appended && wsrep_certify_nonPK) {
uchar digest[16];
- int rcode;
wsrep_calc_row_hash(digest, record0, table, m_prebuilt);
- if ((rcode = wsrep_append_key(thd, trx, table_share,
- (const char*) digest, 16,
- key_type))) {
+ if (int rcode = wsrep_append_key(thd, trx, table_share,
+ digest, 16, key_type)) {
DBUG_RETURN(rcode);
}
if (record1) {
wsrep_calc_row_hash(
digest, record1, table, m_prebuilt);
- if ((rcode = wsrep_append_key(thd, trx, table_share,
- (const char*) digest,
- 16, key_type))) {
+ if (int rcode = wsrep_append_key(thd, trx, table_share,
+ digest, 16,
+ key_type)) {
DBUG_RETURN(rcode);
}
}
@@ -13333,17 +13433,10 @@ innobase_drop_database(
@param[in,out] trx InnoDB data dictionary transaction
@param[in] from old table name
@param[in] to new table name
-@param[in] commit whether to commit trx
-@param[in] use_fk whether to parse and enforce FOREIGN KEY constraints
+@param[in] commit whether to commit trx (and to enforce FOREIGN KEY)
@return DB_SUCCESS or error code */
-inline
-dberr_t
-innobase_rename_table(
- trx_t* trx,
- const char* from,
- const char* to,
- bool commit,
- bool use_fk)
+inline dberr_t innobase_rename_table(trx_t *trx, const char *from,
+ const char *to, bool commit)
{
dberr_t error;
char norm_to[FN_REFLEN];
@@ -13376,6 +13469,9 @@ innobase_rename_table(
Convert lock_wait_timeout unit from second to 250 milliseconds */
long int lock_wait_timeout = thd_lock_wait_timeout(trx->mysql_thd) * 4;
if (table != NULL) {
+ if (commit) {
+ dict_stats_wait_bg_to_stop_using_table(table, trx);
+ }
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
@@ -13389,7 +13485,9 @@ innobase_rename_table(
}
}
}
- dict_table_close(table, TRUE, FALSE);
+ if (!commit) {
+ dict_table_close(table, TRUE, FALSE);
+ }
}
/* FTS sync is in progress. We shall timeout this operation */
@@ -13399,7 +13497,7 @@ innobase_rename_table(
}
error = row_rename_table_for_mysql(norm_from, norm_to, trx, commit,
- use_fk);
+ commit);
if (error != DB_SUCCESS) {
if (error == DB_TABLE_NOT_FOUND
@@ -13451,6 +13549,10 @@ innobase_rename_table(
func_exit:
if (commit) {
+ if (table) {
+ table->stats_bg_flag &= ~BG_STAT_SHOULD_QUIT;
+ dict_table_close(table, TRUE, FALSE);
+ }
row_mysql_unlock_data_dictionary(trx);
}
@@ -13505,9 +13607,11 @@ int ha_innobase::truncate()
++trx->will_lock;
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
row_mysql_lock_data_dictionary(trx);
+ dict_stats_wait_bg_to_stop_using_table(ib_table, trx);
+
int err = convert_error_code_to_mysql(
innobase_rename_table(trx, ib_table->name.m_name, temp_name,
- false, false),
+ false),
ib_table->flags, m_user_thd);
if (err) {
trx_rollback_for_mysql(trx);
@@ -13590,7 +13694,7 @@ ha_innobase::rename_table(
++trx->will_lock;
trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
- dberr_t error = innobase_rename_table(trx, from, to, true, true);
+ dberr_t error = innobase_rename_table(trx, from, to, true);
DEBUG_SYNC(thd, "after_innobase_rename_table");
@@ -14137,6 +14241,10 @@ ha_innobase::info_low(
ib_table = m_prebuilt->table;
DBUG_ASSERT(ib_table->get_ref_count() > 0);
+ if (!ib_table->is_readable()) {
+ ib_table->stat_initialized = true;
+ }
+
if (flag & HA_STATUS_TIME) {
if (is_analyze || innobase_stats_on_metadata) {
@@ -14148,6 +14256,13 @@ ha_innobase::info_low(
if (dict_stats_is_persistent_enabled(ib_table)) {
if (is_analyze) {
+ row_mysql_lock_data_dictionary(
+ m_prebuilt->trx);
+ dict_stats_recalc_pool_del(ib_table);
+ dict_stats_wait_bg_to_stop_using_table(
+ ib_table, m_prebuilt->trx);
+ row_mysql_unlock_data_dictionary(
+ m_prebuilt->trx);
opt = DICT_STATS_RECALC_PERSISTENT;
} else {
/* This is e.g. 'SHOW INDEXES', fetch
@@ -14160,6 +14275,13 @@ ha_innobase::info_low(
ret = dict_stats_update(ib_table, opt);
+ if (opt == DICT_STATS_RECALC_PERSISTENT) {
+ mutex_enter(&dict_sys.mutex);
+ ib_table->stats_bg_flag
+ &= byte(~BG_STAT_SHOULD_QUIT);
+ mutex_exit(&dict_sys.mutex);
+ }
+
if (ret != DB_SUCCESS) {
m_prebuilt->trx->op_info = "";
DBUG_RETURN(HA_ERR_GENERIC);
@@ -14175,6 +14297,8 @@ ha_innobase::info_low(
DBUG_EXECUTE_IF("dict_sys_mutex_avoid", goto func_exit;);
+ dict_stats_init(ib_table);
+
if (flag & HA_STATUS_VARIABLE) {
ulint stat_clustered_index_size;
@@ -15317,10 +15441,6 @@ ha_innobase::extra(
case HA_EXTRA_END_ALTER_COPY:
m_prebuilt->table->skip_alter_undo = 0;
break;
- case HA_EXTRA_FAKE_START_STMT:
- trx_register_for_2pc(m_prebuilt->trx);
- m_prebuilt->sql_stat_start = true;
- break;
default:/* Do nothing */
;
}
@@ -17120,7 +17240,8 @@ innodb_io_capacity_update(
" higher than innodb_io_capacity_max %lu",
in_val, srv_max_io_capacity);
- srv_max_io_capacity = in_val * 2;
+ srv_max_io_capacity = (in_val & ~(~0UL >> 1))
+ ? in_val : in_val * 2;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_ARGUMENTS,
@@ -18559,54 +18680,59 @@ static struct st_mysql_storage_engine innobase_storage_engine=
#ifdef WITH_WSREP
-/** This function is used to kill one transaction.
-
-This transaction was open on this node (not-yet-committed), and a
-conflicting writeset from some other node that was being applied
-caused a locking conflict. First committed (from other node)
-wins, thus open transaction is rolled back. BF stands for
-brute-force: any transaction can get aborted by galera any time
-it is necessary.
+struct bg_wsrep_kill_trx_arg {
+ my_thread_id thd_id, bf_thd_id;
+ trx_id_t trx_id, bf_trx_id;
+ bool signal;
+};
-This conflict can happen only when the replicated writeset (from
-other node) is being applied, not when it’s waiting in the queue.
-If our local transaction reached its COMMIT and this conflicting
-writeset was in the queue, then it should fail the local
-certification test instead.
+/** Kill one transaction from a background manager thread
-A brute force abort is only triggered by a locking conflict
-between a writeset being applied by an applier thread (slave thread)
-and an open transaction on the node, not by a Galera writeset
-comparison as in the local certification failure.
+wsrep_innobase_kill_one_trx() is invoked when lock_sys.mutex and trx mutex
+are taken, wsrep_thd_bf_abort() cannot be used there as it takes THD mutexes
+that must be taken before lock_sys.mutex and trx mutex. That's why
+wsrep_innobase_kill_one_trx only posts the killing task to the manager thread
+and the actual killing happens asynchronously here.
-@param[in] bf_thd Brute force (BF) thread
-@param[in,out] victim_trx Vimtim trx to be killed
-@param[in] signal Should victim be signaled */
-UNIV_INTERN
-int
-wsrep_innobase_kill_one_trx(
- THD* bf_thd,
- trx_t *victim_trx,
- bool signal)
+As no mutexes were held we don't know whether THD or trx pointers are still
+valid, so we need to pass thread/trx ids and perform a lookup.
+*/
+static void bg_wsrep_kill_trx(void *void_arg)
{
- ut_ad(bf_thd);
- ut_ad(victim_trx);
- ut_ad(lock_mutex_own());
- ut_ad(trx_mutex_own(victim_trx));
+ bg_wsrep_kill_trx_arg *arg= (bg_wsrep_kill_trx_arg *)void_arg;
+ THD *thd, *bf_thd;
+ trx_t *victim_trx;
+ bool aborting= false;
- DBUG_ENTER("wsrep_innobase_kill_one_trx");
+ if ((bf_thd= find_thread_by_id(arg->bf_thd_id)))
+ wsrep_thd_LOCK(bf_thd);
+ if ((thd= find_thread_by_id(arg->thd_id)))
+ wsrep_thd_LOCK(thd);
- THD *thd= (THD *) victim_trx->mysql_thd;
- ut_ad(thd);
- /* Note that bf_trx might not exist here e.g. on MDL conflict
- case (test: galera_concurrent_ctas). Similarly, BF thread
- could be also acquiring MDL-lock causing victim to be
- aborted. However, we have not yet called innobase_trx_init()
- for BF transaction (test: galera_many_columns)*/
- trx_t* bf_trx= thd_to_trx(bf_thd);
- DBUG_ASSERT(wsrep_on(bf_thd));
+ if (!thd || !bf_thd || !(victim_trx= thd_to_trx(thd)))
+ goto ret0;
+
+ lock_mutex_enter();
+ trx_mutex_enter(victim_trx);
+ if (victim_trx->id != arg->trx_id
+ || victim_trx->state == TRX_STATE_COMMITTED_IN_MEMORY)
+ {
+ /* Apparently victim trx was meanwhile rolled back or
+ committed. Tell bf thd not to wait, in case it already
+ started to. */
+ trx_t *trx= thd_to_trx(bf_thd);
+ if (!trx) {
+ /* bf_thd might not be associated with a
+ transaction, in case of MDL conflict */
+ } else if (lock_t *lock = trx->lock.wait_lock) {
+ trx_mutex_enter(trx);
+ lock_cancel_waiting_and_release(lock);
+ trx_mutex_exit(trx);
+ }
+ goto ret1;
+ }
- wsrep_thd_LOCK(thd);
+ DBUG_ASSERT(wsrep_on(bf_thd));
WSREP_LOG_CONFLICT(bf_thd, thd, TRUE);
@@ -18614,7 +18740,7 @@ wsrep_innobase_kill_one_trx(
"seqno: %lld client_state: %s client_mode: %s transaction_mode: %s "
"query: %s",
wsrep_thd_is_BF(bf_thd, false) ? "BF" : "normal",
- bf_trx ? bf_trx->id : TRX_ID_MAX,
+ arg->bf_trx_id,
thd_get_thread_id(bf_thd),
wsrep_thd_trx_seqno(bf_thd),
wsrep_thd_client_state_str(bf_thd),
@@ -18639,30 +18765,86 @@ wsrep_innobase_kill_one_trx(
if (wsrep_thd_set_wsrep_aborter(bf_thd, thd))
{
WSREP_DEBUG("innodb kill transaction skipped due to wsrep_aborter set");
- wsrep_thd_UNLOCK(thd);
- DBUG_RETURN(0);
+ goto ret1;
}
- /* Note that we need to release this as it will be acquired
- below in wsrep-lib */
- wsrep_thd_UNLOCK(thd);
- DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort");
+ aborting= true;
- if (wsrep_thd_bf_abort(bf_thd, thd, signal))
- {
- lock_t* wait_lock = victim_trx->lock.wait_lock;
- if (wait_lock) {
- DBUG_ASSERT(victim_trx->is_wsrep());
- WSREP_DEBUG("victim has wait flag: %lu",
- thd_get_thread_id(thd));
-
- WSREP_DEBUG("canceling wait lock");
- victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
- lock_cancel_waiting_and_release(wait_lock);
+ret1:
+ trx_mutex_exit(victim_trx);
+ lock_mutex_exit();
+ret0:
+ if (thd) {
+ wsrep_thd_UNLOCK(thd);
+ if (aborting) {
+ DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort");
+ wsrep_thd_bf_abort(bf_thd, thd, arg->signal);
}
+ wsrep_thd_kill_UNLOCK(thd);
+ }
+ if (bf_thd) {
+ wsrep_thd_UNLOCK(bf_thd);
+ wsrep_thd_kill_UNLOCK(bf_thd);
}
+ free(arg);
+}
- DBUG_RETURN(0);
+/** This function is used to kill one transaction.
+
+This transaction was open on this node (not-yet-committed), and a
+conflicting writeset from some other node that was being applied
+caused a locking conflict. First committed (from other node)
+wins, thus open transaction is rolled back. BF stands for
+brute-force: any transaction can get aborted by galera any time
+it is necessary.
+
+This conflict can happen only when the replicated writeset (from
+other node) is being applied, not when it’s waiting in the queue.
+If our local transaction reached its COMMIT and this conflicting
+writeset was in the queue, then it should fail the local
+certification test instead.
+
+A brute force abort is only triggered by a locking conflict
+between a writeset being applied by an applier thread (slave thread)
+and an open transaction on the node, not by a Galera writeset
+comparison as in the local certification failure.
+
+@param[in] bf_thd Brute force (BF) thread
+@param[in,out] victim_trx Transaction to be killed
+@param[in] signal Should victim be signaled */
+void
+wsrep_innobase_kill_one_trx(
+ THD* bf_thd,
+ trx_t *victim_trx,
+ bool signal)
+{
+ ut_ad(bf_thd);
+ ut_ad(victim_trx);
+ ut_ad(lock_mutex_own());
+ ut_ad(trx_mutex_own(victim_trx));
+
+ DBUG_ENTER("wsrep_innobase_kill_one_trx");
+
+ DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.before_wsrep_thd_abort_reached "
+ "WAIT_FOR signal.before_wsrep_thd_abort";
+ DBUG_ASSERT(!debug_sync_set_action(bf_thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+ trx_t* bf_trx= thd_to_trx(bf_thd);
+ bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)malloc(sizeof(*arg));
+ arg->thd_id = thd_get_thread_id(victim_trx->mysql_thd);
+ arg->trx_id = victim_trx->id;
+ arg->bf_thd_id = thd_get_thread_id(bf_thd);
+ arg->bf_trx_id = bf_trx ? bf_trx->id : TRX_ID_MAX;
+ arg->signal = signal;
+ mysql_manager_submit(bg_wsrep_kill_trx, arg);
+
+ DBUG_VOID_RETURN;
}
/** This function forces the victim transaction to abort. Aborting the
@@ -18675,14 +18857,14 @@ wsrep_innobase_kill_one_trx(
@return -1 victim thread was aborted (no transaction)
*/
static
-int
+void
wsrep_abort_transaction(
handlerton*,
THD *bf_thd,
THD *victim_thd,
my_bool signal)
{
- DBUG_ENTER("wsrep_innobase_abort_thd");
+ DBUG_ENTER("wsrep_abort_transaction");
ut_ad(bf_thd);
ut_ad(victim_thd);
@@ -18696,17 +18878,47 @@ wsrep_abort_transaction(
if (victim_trx) {
lock_mutex_enter();
trx_mutex_enter(victim_trx);
- int rcode= wsrep_innobase_kill_one_trx(bf_thd,
- victim_trx, signal);
+ victim_trx->lock.was_chosen_as_wsrep_victim= true;
trx_mutex_exit(victim_trx);
lock_mutex_exit();
+
+ wsrep_thd_kill_LOCK(victim_thd);
+ wsrep_thd_LOCK(victim_thd);
+ bool aborting= !wsrep_thd_set_wsrep_aborter(bf_thd, victim_thd);
+ wsrep_thd_UNLOCK(victim_thd);
+ if (aborting) {
+ DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort");
+ DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.before_wsrep_thd_abort_reached "
+ "WAIT_FOR signal.before_wsrep_thd_abort";
+ DBUG_ASSERT(!debug_sync_set_action(bf_thd,
+ STRING_WITH_LEN(act)));
+ };);
+ wsrep_thd_bf_abort(bf_thd, victim_thd, signal);
+ }
+ wsrep_thd_kill_UNLOCK(victim_thd);
+
wsrep_srv_conc_cancel_wait(victim_trx);
- DBUG_RETURN(rcode);
+ DBUG_VOID_RETURN;
} else {
+ DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.before_wsrep_thd_abort_reached "
+ "WAIT_FOR signal.before_wsrep_thd_abort";
+ DBUG_ASSERT(!debug_sync_set_action(bf_thd,
+ STRING_WITH_LEN(act)));
+ };);
+ wsrep_thd_kill_LOCK(victim_thd);
wsrep_thd_bf_abort(bf_thd, victim_thd, signal);
+ wsrep_thd_kill_UNLOCK(victim_thd);
}
- DBUG_RETURN(-1);
+ DBUG_VOID_RETURN;
}
static
@@ -18741,6 +18953,14 @@ innobase_wsrep_get_checkpoint(
}
#endif /* WITH_WSREP */
+static void innodb_idle_flush_pct_update(THD *thd, st_mysql_sys_var *var,
+ void*, const void *save)
+{
+ innodb_idle_flush_pct = *static_cast<const ulong*>(save);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND, deprecated_idle_flush_pct);
+}
+
/* plugin options */
static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm,
@@ -18769,7 +18989,7 @@ static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm,
" Files updated when this option is set to crc32 or strict_crc32 will"
" not be readable by MariaDB versions older than 10.0.4;"
" new files created with full_crc32 are readable by MariaDB 10.4.3+",
- NULL, NULL, SRV_CHECKSUM_ALGORITHM_CRC32,
+ NULL, innodb_checksum_algorithm_update, SRV_CHECKSUM_ALGORITHM_CRC32,
&innodb_checksum_algorithm_typelib);
static MYSQL_SYSVAR_BOOL(log_checksums, innodb_log_checksums,
@@ -18796,12 +19016,10 @@ static MYSQL_SYSVAR_BOOL(doublewrite, srv_use_doublewrite_buf,
" Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
-static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
+static MYSQL_SYSVAR_BOOL(use_atomic_writes, srv_use_atomic_writes,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
"Enable atomic writes, instead of using the doublewrite buffer, for files "
"on devices that supports atomic writes. "
- "To use this option one must use "
- "innodb_file_per_table=1, innodb_flush_method=O_DIRECT. "
"This option only works on Linux with either FusionIO cards using "
"the directFS filesystem or with Shannon cards using any file system.",
NULL, NULL, TRUE);
@@ -18830,12 +19048,10 @@ static MYSQL_SYSVAR_ULONG(io_capacity_max, srv_max_io_capacity,
SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT, 100,
SRV_MAX_IO_CAPACITY_LIMIT, 0);
-static MYSQL_SYSVAR_ULONG(idle_flush_pct,
- srv_idle_flush_pct,
+static MYSQL_SYSVAR_ULONG(idle_flush_pct, innodb_idle_flush_pct,
PLUGIN_VAR_RQCMDARG,
- "Up to what percentage of dirty pages should be flushed when innodb "
- "finds it has spare resources to do so.",
- NULL, NULL, 100, 0, 100, 0);
+ "DEPRECATED. This setting has no effect.",
+ NULL, innodb_idle_flush_pct_update, 100, 0, 100, 0);
#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_BOOL(background_drop_list_empty,
@@ -19813,11 +20029,6 @@ static MYSQL_SYSVAR_BOOL(master_thread_disabled_debug,
PLUGIN_VAR_OPCMDARG,
"Disable master thread",
NULL, srv_master_thread_disabled_debug_update, FALSE);
-
-static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures,
- PLUGIN_VAR_NOCMDARG,
- "Simulate compression failures.",
- NULL, NULL, 0, 0, 99, 0);
#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_BOOL(force_primary_key,
@@ -20134,7 +20345,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(compression_pad_pct_max),
MYSQL_SYSVAR(default_row_format),
#ifdef UNIV_DEBUG
- MYSQL_SYSVAR(simulate_comp_failures),
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
MYSQL_SYSVAR(limit_optimistic_insert_debug),
MYSQL_SYSVAR(trx_purge_view_update_only_debug),
@@ -20368,7 +20578,7 @@ static bool table_name_parse(
memcpy(tbl_buf, tbl_name.m_name + dbnamelen + 1, tblnamelen);
tbl_buf[tblnamelen] = 0;
- filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true);
+ dbnamelen = filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true);
if (tblnamelen > TEMP_FILE_PREFIX_LENGTH
&& !strncmp(tbl_buf, TEMP_FILE_PREFIX, TEMP_FILE_PREFIX_LENGTH)) {
@@ -20380,7 +20590,7 @@ static bool table_name_parse(
tblnamelen = is_part - tbl_buf;
}
- filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true);
+ tblnamelen = filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true);
return true;
}
@@ -20812,11 +21022,11 @@ innobase_get_computed_value(
field = dtuple_get_nth_v_field(row, col->v_pos);
- my_bitmap_map* old_write_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->write_set);
- my_bitmap_map* old_read_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->read_set);
+ MY_BITMAP *old_write_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->write_set);
+ MY_BITMAP *old_read_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->read_set);
ret = mysql_table->update_virtual_field(mysql_table->field[col->m_col.ind]);
- dbug_tmp_restore_column_map(mysql_table->read_set, old_read_set);
- dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(&mysql_table->read_set, old_read_set);
+ dbug_tmp_restore_column_map(&mysql_table->write_set, old_write_set);
if (ret != 0) {
DBUG_RETURN(NULL);
@@ -21533,11 +21743,12 @@ ib_push_warning(
va_start(args, format);
buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME));
- vsprintf(buf,format, args);
-
- push_warning_printf(
- thd, Sql_condition::WARN_LEVEL_WARN,
- uint(convert_error_code_to_mysql(error, 0, thd)), buf);
+ buf[MAX_BUF_SIZE - 1] = 0;
+ vsnprintf(buf, MAX_BUF_SIZE - 1, format, args);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ uint(convert_error_code_to_mysql(error, 0,
+ thd)),
+ buf);
my_free(buf);
va_end(args);
}
@@ -21565,7 +21776,8 @@ ib_push_warning(
if (thd) {
va_start(args, format);
buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME));
- vsprintf(buf,format, args);
+ buf[MAX_BUF_SIZE - 1] = 0;
+ vsnprintf(buf, MAX_BUF_SIZE - 1, format, args);
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index eb2afb93595..619dfca34fb 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -45,7 +45,7 @@ struct ha_table_option_struct
uint atomic_writes; /*!< Use atomic writes for this
table if this options is ON or
in DEFAULT if
- srv_use_atomic_writes=1.
+ innodb_use_atomic_writes.
Atomic writes are not used if
value OFF.*/
uint encryption; /*!< DEFAULT, ON, OFF */
@@ -968,6 +968,3 @@ which is in the prepared state
@return 0 or error number */
int innobase_rollback_by_xid(handlerton* hton, XID* xid);
-
-/** Free tablespace resources allocated. */
-void innobase_space_shutdown();
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index d473f9dec8f..a330cbc5460 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -766,6 +766,13 @@ inline void dict_table_t::rollback_instant(
const ulint* col_map)
{
ut_d(dict_sys.assert_locked());
+
+ if (cols == old_cols) {
+ /* Alter fails before instant operation happens.
+ So there is no need to do rollback instant operation */
+ return;
+ }
+
dict_index_t* index = indexes.start;
mtr_t mtr;
mtr.start();
@@ -1060,13 +1067,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
@return whether the table will be rebuilt */
bool need_rebuild () const { return(old_table != new_table); }
- /** Clear uncommmitted added indexes after a failed operation. */
- void clear_added_indexes()
- {
- for (ulint i= 0; i < num_to_add_index; i++)
- add_index[i]->detach_columns(true);
- }
-
/** Convert table-rebuilding ALTER to instant ALTER. */
void prepare_instant()
{
@@ -1164,6 +1164,42 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
}
}
+ /** @return whether the given column is being added */
+ bool is_new_vcol(const dict_v_col_t &v_col) const
+ {
+ for (ulint i= 0; i < num_to_add_vcol; i++)
+ if (&add_vcol[i] == &v_col)
+ return true;
+ return false;
+ }
+
+ /** During rollback, make newly added indexes point to
+ newly added virtual columns. */
+ void clean_new_vcol_index()
+ {
+ ut_ad(old_table == new_table);
+ const dict_index_t *index= dict_table_get_first_index(old_table);
+ while ((index= dict_table_get_next_index(index)) != NULL)
+ {
+ if (!index->has_virtual() || index->is_committed())
+ continue;
+ ulint n_drop_new_vcol= index->get_new_n_vcol();
+ for (ulint i= 0; n_drop_new_vcol && i < index->n_fields; i++)
+ {
+ dict_col_t *col= index->fields[i].col;
+ /* Skip the non-virtual and old virtual columns */
+ if (!col->is_virtual())
+ continue;
+ dict_v_col_t *vcol= reinterpret_cast<dict_v_col_t*>(col);
+ if (!is_new_vcol(*vcol))
+ continue;
+
+ index->fields[i].col= &index->new_vcol_info->
+ add_drop_v_col(index->heap, vcol, --n_drop_new_vcol)->m_col;
+ }
+ }
+ }
+
private:
// Disable copying
ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&);
@@ -3433,9 +3469,9 @@ innobase_row_to_mysql(
}
}
if (table->vfield) {
- my_bitmap_map* old_read_set = tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP* old_read_set = tmp_use_all_columns(table, &table->read_set);
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ);
- tmp_restore_column_map(table->read_set, old_read_set);
+ tmp_restore_column_map(&table->read_set, old_read_set);
}
}
@@ -3791,9 +3827,11 @@ innobase_fts_check_doc_id_index(
for (index = dict_table_get_first_index(table);
index; index = dict_table_get_next_index(index)) {
+
/* Check if there exists a unique index with the name of
- FTS_DOC_ID_INDEX_NAME */
- if (innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) {
+ FTS_DOC_ID_INDEX_NAME and ignore the corrupted index */
+ if (index->type & DICT_CORRUPT
+ || innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) {
continue;
}
@@ -4101,7 +4139,7 @@ online_retry_drop_indexes_low(
ut_ad(table->get_ref_count() >= 1);
if (table->drop_aborted) {
- row_merge_drop_indexes(trx, table, TRUE);
+ row_merge_drop_indexes(trx, table, true);
}
}
@@ -5907,11 +5945,13 @@ add_all_virtual:
const rec_t* rec = btr_pcur_get_rec(&pcur);
que_thr_t* thr = pars_complete_graph_for_exec(
NULL, trx, ctx->heap, NULL);
+ const bool is_root = block->page.id.page_no() == index->page;
dberr_t err = DB_SUCCESS;
if (rec_is_metadata(rec, *index)) {
ut_ad(page_rec_is_user_rec(rec));
- if (!rec_is_alter_metadata(rec, *index)
+ if (is_root
+ && !rec_is_alter_metadata(rec, *index)
&& !index->table->instant
&& !page_has_next(block->frame)
&& page_rec_is_last(rec, block->frame)) {
@@ -5978,7 +6018,7 @@ add_all_virtual:
offsets = rec_get_offsets(
btr_pcur_get_rec(&pcur), index, offsets,
- true, ULINT_UNDEFINED, &offsets_heap);
+ index->n_core_fields, ULINT_UNDEFINED, &offsets_heap);
if (big_rec) {
if (err == DB_SUCCESS) {
err = btr_store_big_rec_extern_fields(
@@ -5993,7 +6033,8 @@ add_all_virtual:
}
btr_pcur_close(&pcur);
goto func_exit;
- } else if (page_rec_is_supremum(rec) && !index->table->instant) {
+ } else if (is_root && page_rec_is_supremum(rec)
+ && !index->table->instant) {
empty_table:
/* The table is empty. */
ut_ad(fil_page_index_page_check(block->frame));
@@ -6523,6 +6564,7 @@ new_clustered_failed:
}
if (dict_col_name_is_reserved(field->field_name.str)) {
+wrong_column_name:
dict_mem_table_free(ctx->new_table);
ctx->new_table = ctx->old_table;
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
@@ -6530,6 +6572,21 @@ new_clustered_failed:
goto new_clustered_failed;
}
+ /** Note the FTS_DOC_ID name is case sensitive due
+ to internal query parser.
+ FTS_DOC_ID column must be of BIGINT NOT NULL type
+ and it should be in all capitalized characters */
+ if (!innobase_strcasecmp(field->field_name.str,
+ FTS_DOC_ID_COL_NAME)) {
+ if (col_type != DATA_INT
+ || field->real_maybe_null()
+ || col_len != sizeof(doc_id_t)
+ || strcmp(field->field_name.str,
+ FTS_DOC_ID_COL_NAME)) {
+ goto wrong_column_name;
+ }
+ }
+
if (is_virtual) {
dict_mem_table_add_v_col(
ctx->new_table, ctx->heap,
@@ -6878,7 +6935,7 @@ new_table_failed:
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
dict_index_t* index = ctx->add_index[a];
- const bool has_new_v_col = index->has_new_v_col;
+ const ulint n_v_col = index->get_new_n_vcol();
index = create_index_dict(ctx->trx, index, add_v);
error = ctx->trx->error_state;
if (error != DB_SUCCESS) {
@@ -6908,7 +6965,9 @@ error_handling_drop_uncached_1:
goto error_handling_drop_uncached_1;
}
index->parser = index_defs[a].parser;
- index->has_new_v_col = has_new_v_col;
+ if (n_v_col) {
+ index->assign_new_v_col(n_v_col);
+ }
/* Note the id of the transaction that created this
index, we use it to restrict readers from accessing
this index, to ensure read consistency. */
@@ -6978,7 +7037,7 @@ error_handling_drop_uncached_1:
for (ulint a = 0; a < ctx->num_to_add_index; a++) {
dict_index_t* index = ctx->add_index[a];
- const bool has_new_v_col = index->has_new_v_col;
+ const ulint n_v_col = index->get_new_n_vcol();
DBUG_EXECUTE_IF(
"create_index_metadata_fail",
if (a + 1 == ctx->num_to_add_index) {
@@ -7010,7 +7069,9 @@ error_handling_drop_uncached:
}
index->parser = index_defs[a].parser;
- index->has_new_v_col = has_new_v_col;
+ if (n_v_col) {
+ index->assign_new_v_col(n_v_col);
+ }
/* Note the id of the transaction that created this
index, we use it to restrict readers from accessing
this index, to ensure read consistency. */
@@ -7235,7 +7296,7 @@ error_handled:
online_retry_drop_indexes_with_trx(user_table, ctx->trx);
} else {
ut_ad(!ctx->need_rebuild());
- row_merge_drop_indexes(ctx->trx, user_table, TRUE);
+ row_merge_drop_indexes(ctx->trx, user_table, true);
trx_commit_for_mysql(ctx->trx);
}
@@ -8598,7 +8659,6 @@ oom:
that we hold at most a shared lock on the table. */
m_prebuilt->trx->error_info = NULL;
ctx->trx->error_state = DB_SUCCESS;
- ctx->clear_added_indexes();
DBUG_RETURN(true);
}
@@ -8690,17 +8750,18 @@ temparary index prefix
@param table the TABLE
@param locked TRUE=table locked, FALSE=may need to do a lazy drop
@param trx the transaction
-*/
-static MY_ATTRIBUTE((nonnull))
+@param alter_trx transaction which takes S-lock on the table
+ while creating the index */
+static
void
innobase_rollback_sec_index(
-/*========================*/
- dict_table_t* user_table,
- const TABLE* table,
- ibool locked,
- trx_t* trx)
+ dict_table_t* user_table,
+ const TABLE* table,
+ bool locked,
+ trx_t* trx,
+ const trx_t* alter_trx=NULL)
{
- row_merge_drop_indexes(trx, user_table, locked);
+ row_merge_drop_indexes(trx, user_table, locked, alter_trx);
/* Free the table->fts only if there is no FTS_DOC_ID
in the table */
@@ -8795,7 +8856,12 @@ rollback_inplace_alter_table(
DBUG_ASSERT(ctx->new_table == prebuilt->table);
innobase_rollback_sec_index(
- prebuilt->table, table, FALSE, ctx->trx);
+ prebuilt->table, table,
+ (ha_alter_info->alter_info->requested_lock
+ == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE),
+ ctx->trx, prebuilt->trx);
+
+ ctx->clean_new_vcol_index();
}
trx_commit_for_mysql(ctx->trx);
@@ -8937,6 +9003,7 @@ innobase_rename_column_try(
const char* to)
{
dberr_t error;
+ bool clust_has_prefixes = false;
DBUG_ENTER("innobase_rename_column_try");
@@ -8996,6 +9063,39 @@ innobase_rename_column_try(
if (error != DB_SUCCESS) {
goto err_exit;
}
+
+ if (!has_prefixes || !clust_has_prefixes
+ || f.prefix_len) {
+ continue;
+ }
+
+ /* For secondary indexes, the
+ has_prefixes check can be 'polluted'
+ by PRIMARY KEY column prefix. Try also
+ the simpler encoding of SYS_FIELDS.POS. */
+ info = pars_info_create();
+
+ pars_info_add_ull_literal(info, "indexid", index->id);
+ pars_info_add_int4_literal(info, "nth", i);
+ pars_info_add_str_literal(info, "new", to);
+
+ error = que_eval_sql(
+ info,
+ "PROCEDURE RENAME_SYS_FIELDS_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_FIELDS SET COL_NAME=:new\n"
+ "WHERE INDEX_ID=:indexid\n"
+ "AND POS=:nth;\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
+ }
+ }
+
+ if (index == dict_table_get_first_index(ctx.old_table)) {
+ clust_has_prefixes = has_prefixes;
}
}
@@ -10150,6 +10250,44 @@ innobase_page_compression_try(
DBUG_RETURN(false);
}
+static
+void
+dict_stats_try_drop_table(THD *thd, const table_name_t &name,
+ const LEX_CSTRING &table_name)
+{
+ char errstr[1024];
+ if (dict_stats_drop_table(name.m_name, errstr, sizeof(errstr)) != DB_SUCCESS)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO,
+ "Deleting persistent statistics"
+ " for table '%s' in InnoDB failed: %s",
+ table_name.str,
+ errstr);
+ }
+}
+
+/** Evict the table from cache and reopen it. Drop outdated statistics.
+ @param thd mariadb THD entity
+ @param table innodb table
+ @param maria_table_name user-friendly table name for errors
+ @return newly opened table */
+static
+dict_table_t*
+innobase_reload_table(THD *thd, dict_table_t *table,
+ const LEX_CSTRING &table_name)
+{
+ char *tb_name= strdup(table->name.m_name);
+ dict_table_close(table, true, false);
+ dict_sys.remove(table);
+ table= dict_table_open_on_name(tb_name, TRUE, TRUE,
+ DICT_ERR_IGNORE_FK_NOKEY);
+
+ /* Drop outdated table stats. */
+ dict_stats_try_drop_table(thd, table->name, table_name);
+ free(tb_name);
+ return table;
+}
+
/** Commit the changes made during prepare_inplace_alter_table()
and inplace_alter_table() inside the data dictionary tables,
when not rebuilding the table.
@@ -11358,44 +11496,25 @@ foreign_fail:
Currently dict_load_column_low() is the only place where
num_base for virtual columns is assigned to nonzero. */
if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol
+ || (ctx0->new_table->n_v_cols && !new_clustered
+ && (ha_alter_info->alter_info->drop_list.elements
+ || ha_alter_info->alter_info->create_list.elements))
|| (ctx0->is_instant()
&& m_prebuilt->table->n_v_cols
&& ha_alter_info->handler_flags & ALTER_STORED_COLUMN_ORDER)) {
- /* FIXME: this workaround does not seem to work with
- partitioned tables */
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
-
trx_commit_for_mysql(m_prebuilt->trx);
- char tb_name[NAME_LEN * 2 + 1 + 1];
- strcpy(tb_name, m_prebuilt->table->name.m_name);
- dict_table_close(m_prebuilt->table, true, false);
if (ctx0->is_instant()) {
for (unsigned i = ctx0->old_n_v_cols; i--; ) {
ctx0->old_v_cols[i].~dict_v_col_t();
}
const_cast<unsigned&>(ctx0->old_n_v_cols) = 0;
}
- dict_sys.remove(m_prebuilt->table);
- m_prebuilt->table = dict_table_open_on_name(
- tb_name, TRUE, TRUE, DICT_ERR_IGNORE_FK_NOKEY);
- /* Drop outdated table stats. */
- char errstr[1024];
- if (dict_stats_drop_table(
- m_prebuilt->table->name.m_name,
- errstr, sizeof(errstr))
- != DB_SUCCESS) {
- push_warning_printf(
- m_user_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_ALTER_INFO,
- "Deleting persistent statistics"
- " for table '%s' in"
- " InnoDB failed: %s",
- table->s->table_name.str,
- errstr);
- }
+ m_prebuilt->table = innobase_reload_table(m_user_thd,
+ m_prebuilt->table,
+ table->s->table_name);
row_mysql_unlock_data_dictionary(trx);
trx->free();
@@ -11455,25 +11574,12 @@ foreign_fail:
old copy of the table (which was renamed to
ctx->tmp_name). */
- char errstr[1024];
-
DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name,
ctx->tmp_name));
- if (dict_stats_drop_table(
- ctx->new_table->name.m_name,
- errstr, sizeof(errstr))
- != DB_SUCCESS) {
- push_warning_printf(
- m_user_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_ALTER_INFO,
- "Deleting persistent statistics"
- " for rebuilt table '%s' in"
- " InnoDB failed: %s",
- table->s->table_name.str,
- errstr);
- }
+ dict_stats_try_drop_table(m_user_thd,
+ ctx->new_table->name,
+ table->s->table_name);
DBUG_EXECUTE_IF("ib_ddl_crash_before_commit",
DBUG_SUICIDE(););
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 469836f0955..fccd87ab416 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2020, MariaDB Corporation.
+Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -3870,7 +3870,7 @@ dump:
row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */
ut_ad(rec_get_deleted_flag(rec, page_is_comp(page)));
- offsets = rec_get_offsets(rec, index, NULL, true,
+ offsets = rec_get_offsets(rec, index, NULL, index->n_fields,
ULINT_UNDEFINED, &heap);
update = row_upd_build_sec_rec_difference_binary(
rec, index, offsets, entry, heap);
@@ -4043,7 +4043,8 @@ ibuf_delete(
ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
- ut_ad(!dict_index_is_spatial(index));
+ ut_ad(!index->is_spatial());
+ ut_ad(!index->is_clust());
low_match = page_cur_search(block, index, entry, &page_cur);
@@ -4062,8 +4063,8 @@ ibuf_delete(
rec_offs_init(offsets_);
- offsets = rec_get_offsets(
- rec, index, offsets, true, ULINT_UNDEFINED, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
+ ULINT_UNDEFINED, &heap);
if (page_get_n_recs(page) <= 1
|| !(REC_INFO_DELETED_FLAG
@@ -4858,6 +4859,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
bitmap_page = ibuf_bitmap_get_map_page(
page_id_t(space->id, page_no), zip_size, &mtr);
+ if (!bitmap_page) {
+ mutex_exit(&ibuf_mutex);
+ ibuf_exit(&mtr);
+ mtr_commit(&mtr);
+ return DB_CORRUPTION;
+ }
+
if (buf_is_zeroes(span<const byte>(bitmap_page,
physical_size))) {
/* This means we got all-zero page instead of
@@ -4881,11 +4889,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
continue;
}
- if (!bitmap_page) {
- mutex_exit(&ibuf_mutex);
- return DB_CORRUPTION;
- }
-
for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) {
const ulint offset = page_no + i;
const page_id_t cur_page_id(space->id, offset);
diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h
index 4c5294f9b5f..b8428186383 100644
--- a/storage/innobase/include/btr0bulk.h
+++ b/storage/innobase/include/btr0bulk.h
@@ -326,6 +326,8 @@ public:
/** Re-latch all latches */
void latch();
+ table_name_t table_name() { return m_index->table->name; }
+
private:
/** Insert a tuple to a page in a level
@param[in] tuple tuple to insert
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index 38960b1d15c..b0b61a4d1ff 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -497,8 +497,10 @@ struct btr_pcur_t{
/** if cursor position is stored, contains an initial segment of the
latest record cursor was positioned either on, before or after */
rec_t* old_rec;
+ /** btr_cur.index->n_core_fields when old_rec was copied */
+ uint16 old_n_core_fields;
/** number of fields in old_rec */
- ulint old_n_fields;
+ uint16 old_n_fields;
/** BTR_PCUR_ON, BTR_PCUR_BEFORE, or BTR_PCUR_AFTER, depending on
whether cursor was on, before, or after the old_rec record */
enum btr_pcur_pos_t rel_pos;
diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index 04ddf5b0a42..fc774b6ee60 100644
--- a/storage/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
@@ -544,6 +544,17 @@ struct dtuple_t {
@param[in] index index possibly with instantly added columns */
void trim(const dict_index_t& index);
+ bool vers_history_row() const
+ {
+ for (ulint i = 0; i < n_fields; i++) {
+ const dfield_t* field = &fields[i];
+ if (field->type.vers_sys_end()) {
+ return field->vers_history_row();
+ }
+ }
+ return false;
+ }
+
/**
@param info_bits the info_bits of a data tuple
@return whether this is a hidden metadata record
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index a3f4baa4c31..0f730ffbcb7 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index c6a506472df..dc85c85474c 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -795,6 +795,35 @@ struct dict_v_col_t{
}
};
+/** Data structure for newly added virtual column in a index.
+It is used only during rollback_inplace_alter_table() of
+addition of index depending on newly added virtual columns
+and uses index heap. Should be freed when index is being
+removed from cache. */
+struct dict_add_v_col_info
+{
+ ulint n_v_col;
+ dict_v_col_t *v_col;
+
+ /** Add the newly added virtual column while rollbacking
+ the index which contains new virtual columns
+ @param col virtual column to be duplicated
+ @param offset offset where to duplicate virtual column */
+ dict_v_col_t* add_drop_v_col(mem_heap_t *heap, dict_v_col_t *col,
+ ulint offset)
+ {
+ ut_ad(n_v_col);
+ ut_ad(offset < n_v_col);
+ if (!v_col)
+ v_col= static_cast<dict_v_col_t*>
+ (mem_heap_alloc(heap, n_v_col * sizeof *v_col));
+ new (&v_col[offset]) dict_v_col_t();
+ v_col[offset].m_col= col->m_col;
+ v_col[offset].v_pos= col->v_pos;
+ return &v_col[offset];
+ }
+};
+
/** Data structure for newly added virtual column in a table */
struct dict_add_v_col_t{
/** number of new virtual column */
@@ -1039,9 +1068,13 @@ struct dict_index_t {
dict_field_t* fields; /*!< array of field descriptions */
st_mysql_ftparser*
parser; /*!< fulltext parser plugin */
- bool has_new_v_col;
- /*!< whether it has a newly added virtual
- column in ALTER */
+
+ /** It just indicates whether newly added virtual column
+ during alter. It stores column in case of alter failure.
+ It should use heap from dict_index_t. It should be freed
+ while removing the index from table. */
+ dict_add_v_col_info* new_vcol_info;
+
bool index_fts_syncing;/*!< Whether the fts index is
still syncing in the background;
FIXME: remove this and use MDL */
@@ -1198,9 +1231,8 @@ public:
/** @return whether the index is corrupted */
inline bool is_corrupted() const;
- /** Detach the virtual columns from the index that is to be removed.
- @param whether to reset fields[].col */
- void detach_columns(bool clear= false)
+ /** Detach the virtual columns from the index that is to be removed. */
+ void detach_columns()
{
if (!has_virtual() || !cached)
return;
@@ -1210,8 +1242,6 @@ public:
if (!col || !col->is_virtual())
continue;
col->detach(*this);
- if (clear)
- fields[i].col= nullptr;
}
}
@@ -1274,6 +1304,23 @@ public:
bool
vers_history_row(const rec_t* rec, bool &history_row);
+ /** Assign the number of new column to be added as a part
+ of the index
+ @param n_vcol number of virtual columns to be added */
+ void assign_new_v_col(ulint n_vcol)
+ {
+ new_vcol_info= static_cast<dict_add_v_col_info*>
+ (mem_heap_zalloc(heap, sizeof *new_vcol_info));
+ new_vcol_info->n_v_col= n_vcol;
+ }
+
+ /* @return whether index has new virtual column */
+ bool has_new_v_col() const { return new_vcol_info; }
+
+ /* @return number of newly added virtual column */
+ ulint get_new_n_vcol() const
+ { return new_vcol_info ? new_vcol_info->n_v_col : 0; }
+
/** Reconstruct the clustered index fields. */
inline void reconstruct_fields();
@@ -2286,6 +2333,17 @@ public:
/** mysql_row_templ_t for base columns used for compute the virtual
columns */
dict_vcol_templ_t* vc_templ;
+
+ /* @return whether the table has any other transcation lock
+ other than the given transaction */
+ bool has_lock_other_than(const trx_t *trx) const
+ {
+ for (lock_t *lock= UT_LIST_GET_FIRST(locks); lock;
+ lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock))
+ if (lock->trx != trx)
+ return true;
+ return false;
+ }
};
inline void dict_index_t::set_modified(mtr_t& mtr) const
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 3001817a78c..873fcd67a3a 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -637,7 +637,7 @@ struct fil_node_t {
/** Determine some file metadata when creating or reading the file.
@param file the file that is being created, or OS_FILE_CLOSED */
void find_metadata(os_file_t file = OS_FILE_CLOSED
-#ifdef UNIV_LINUX
+#ifndef _WIN32
, struct stat* statbuf = NULL
#endif
);
diff --git a/storage/innobase/include/gis0rtree.ic b/storage/innobase/include/gis0rtree.ic
index 2076b24b9b1..c829f0de255 100644
--- a/storage/innobase/include/gis0rtree.ic
+++ b/storage/innobase/include/gis0rtree.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -57,7 +57,8 @@ rtr_page_cal_mbr(
page = buf_block_get_frame(block);
rec = page_rec_get_next(page_get_infimum_rec(page));
- offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
+ offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page)
+ ? index->n_fields : 0,
ULINT_UNDEFINED, &heap);
do {
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 28e5d1d4f56..108f6925ef7 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -231,7 +231,7 @@ innobase_casedn_str(
#ifdef WITH_WSREP
UNIV_INTERN
-int
+void
wsrep_innobase_kill_one_trx(
THD* bf_thd,
trx_t *victim_trx,
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index dbc53d0b786..fd12b0e3c9e 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -152,7 +152,6 @@ static const ulint OS_FILE_NORMAL = 62;
/** Types for file create @{ */
static const ulint OS_DATA_FILE = 100;
static const ulint OS_LOG_FILE = 101;
-static const ulint OS_DATA_TEMP_FILE = 102;
static const ulint OS_DATA_FILE_NO_O_DIRECT = 103;
/* @} */
diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic
index f0844ee1f73..e53f6d8f463 100644
--- a/storage/innobase/include/page0cur.ic
+++ b/storage/innobase/include/page0cur.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2018, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -273,7 +273,8 @@ page_cur_tuple_insert(
index, tuple, n_ext);
*offsets = rec_get_offsets(rec, index, *offsets,
- page_is_leaf(cursor->block->frame),
+ page_is_leaf(cursor->block->frame)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, heap);
ut_ad(size == rec_offs_size(*offsets));
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index c0a3c86c737..b6584177fe4 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -1093,7 +1093,7 @@ page_get_instant(const page_t* page)
break;
}
#endif /* UNIV_DEBUG */
- return(i >> 3);
+ return static_cast<uint16_t>(i >> 3); /* i / 8 */
}
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/que0que.h b/storage/innobase/include/que0que.h
index c8e1f92e670..f018f73527d 100644
--- a/storage/innobase/include/que0que.h
+++ b/storage/innobase/include/que0que.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -303,7 +303,6 @@ que_fork_scheduler_round_robin(
/** Query thread states */
enum que_thr_state_t {
QUE_THR_RUNNING,
- QUE_THR_PROCEDURE_WAIT,
/** in selects this means that the thread is at the end of its
result set (or start, in case of a scroll cursor); in other
statements, this means the thread has done its task */
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 6f6535c529f..34e7c5f1b0f 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -543,7 +543,7 @@ rec_get_n_extern_new(
@param[in] index the index that the record belongs to
@param[in,out] offsets array comprising offsets[0] allocated elements,
or an array from rec_get_offsets(), or NULL
-@param[in] leaf whether this is a leaf-page record
+@param[in] n_core 0, or index->n_core_fields for leaf page
@param[in] n_fields maximum number of offsets to compute
(ULINT_UNDEFINED to compute all offsets)
@param[in,out] heap memory heap
@@ -553,7 +553,7 @@ rec_get_offsets_func(
const rec_t* rec,
const dict_index_t* index,
rec_offs* offsets,
- bool leaf,
+ ulint n_core,
ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
@@ -1034,12 +1034,14 @@ rec_copy(
const rec_offs* offsets);
/** Determine the size of a data tuple prefix in a temporary file.
+@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
@return total size, in bytes */
+template<bool redundant_temp>
ulint
rec_get_converted_size_temp(
const dict_index_t* index,
@@ -1078,11 +1080,13 @@ rec_init_offsets_temp(
MY_ATTRIBUTE((nonnull));
/** Convert a data tuple prefix to the temporary file format.
+@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format
@param[out] rec record in temporary file format
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */
+template<bool redundant_temp>
void
rec_convert_dtuple_to_temp(
rec_t* rec,
@@ -1175,7 +1179,9 @@ rec_get_converted_size(
The fields are copied into the memory heap.
@param[out] tuple data tuple
@param[in] rec index record, or a copy thereof
-@param[in] is_leaf whether rec is a leaf page record
+@param[in] index index of rec
+@param[in] n_core index->n_core_fields at the time rec was
+ copied, or 0 if non-leaf page record
@param[in] n_fields number of fields to copy
@param[in,out] heap memory heap */
void
@@ -1183,7 +1189,7 @@ rec_copy_prefix_to_dtuple(
dtuple_t* tuple,
const rec_t* rec,
const dict_index_t* index,
- bool is_leaf,
+ ulint n_core,
ulint n_fields,
mem_heap_t* heap)
MY_ATTRIBUTE((nonnull));
diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index 34427dc6dc7..9a16394a052 100644
--- a/storage/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
@@ -206,6 +206,7 @@ struct ins_node_t
if this is NULL, entry list should be created
and buffers for sys fields in row allocated */
void vers_update_end(row_prebuilt_t *prebuilt, bool history_row);
+ bool vers_history_row() const; /* true if 'row' is historical */
};
/** Create an insert object.
diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h
index 5ec4b9c1103..93aa5c24230 100644
--- a/storage/innobase/include/row0log.h
+++ b/storage/innobase/include/row0log.h
@@ -247,6 +247,11 @@ row_log_apply(
ut_stage_alter_t* stage)
MY_ATTRIBUTE((warn_unused_result));
+/** Get the n_core_fields of online log for the index
+@param index index whose n_core_fields of log to be accessed
+@return number of n_core_fields */
+unsigned row_log_get_n_core_fields(const dict_index_t *index);
+
#ifdef HAVE_PSI_STAGE_INTERFACE
/** Estimate how much work is to be done by the log apply phase
of an ALTER TABLE for this index.
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index e88380b94e3..3252af0062b 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -167,18 +167,20 @@ row_merge_drop_indexes_dict(
table_id_t table_id)/*!< in: table identifier */
MY_ATTRIBUTE((nonnull));
-/*********************************************************************//**
-Drop those indexes which were created before an error occurred.
+/** Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
-because the transaction will not be committed. */
+because the transaction will not be committed.
+@param trx dictionary transaction
+@param table table containing the indexes
+@param locked True if table is locked,
+ false - may need to do lazy drop
+@param alter_trx Alter table transaction */
void
row_merge_drop_indexes(
-/*===================*/
- trx_t* trx, /*!< in/out: transaction */
- dict_table_t* table, /*!< in/out: table containing the indexes */
- ibool locked) /*!< in: TRUE=table locked,
- FALSE=may need to do a lazy drop */
- MY_ATTRIBUTE((nonnull));
+ trx_t* trx,
+ dict_table_t* table,
+ bool locked,
+ const trx_t* alter_trx=NULL);
/*********************************************************************//**
Drop all partially created indexes during crash recovery. */
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index f018f4eed73..cbb544f60c1 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -804,12 +804,6 @@ struct row_prebuilt_t {
search key values from MySQL format
to InnoDB format.*/
uint srch_key_val_len; /*!< Size of search key */
- /** Disable prefetch. */
- bool m_no_prefetch;
-
- /** Return materialized key for secondary index scan */
- bool m_read_virtual_key;
-
/** The MySQL table object */
TABLE* m_mysql_table;
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index e1d37613dc9..8dcba6e6bc5 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2019, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -410,8 +410,6 @@ extern double srv_defragment_fill_factor;
extern uint srv_defragment_frequency;
extern ulonglong srv_defragment_interval;
-extern ulong srv_idle_flush_pct;
-
extern uint srv_change_buffer_max_size;
/* Number of IO operations per second the server can do */
@@ -594,9 +592,6 @@ extern struct export_var_t export_vars;
/** Global counters */
extern srv_stats_t srv_stats;
-/** Simulate compression failures. */
-extern uint srv_simulate_comp_failures;
-
/** Fatal semaphore wait threshold = maximum number of seconds
that semaphore times out in InnoDB */
#define DEFAULT_SRV_FATAL_SEMAPHORE_TIMEOUT 600
diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index 6ba457cdc40..5812c87feeb 100644
--- a/storage/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -89,7 +89,6 @@ void
trx_write_trx_id(byte* db_trx_id, trx_id_t id)
{
compile_time_assert(DATA_TRX_ID_LEN == 6);
- ut_ad(id);
mach_write_to_6(db_trx_id, id);
}
@@ -847,8 +846,10 @@ public:
#endif
/** Latest recovered binlog offset */
uint64_t recovered_binlog_offset;
- /** Latest recovred binlog file name */
+ /** Latest recovered binlog file name */
char recovered_binlog_filename[TRX_SYS_MYSQL_LOG_NAME_LEN];
+ /** FIL_PAGE_LSN of the page with the latest recovered binlog metadata */
+ lsn_t recovered_binlog_lsn;
/**
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index c32234d923d..daffbacdfe6 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -799,6 +799,9 @@ public:
/** whether wsrep_on(mysql_thd) held at the start of transaction */
bool wsrep;
bool is_wsrep() const { return UNIV_UNLIKELY(wsrep); }
+ /** true, if BF thread is performing unique secondary index scanning */
+ bool wsrep_UK_scan;
+ bool is_wsrep_UK_scan() const { return UNIV_UNLIKELY(wsrep_UK_scan); }
#else /* WITH_WSREP */
bool is_wsrep() const { return false; }
#endif /* WITH_WSREP */
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 99e493acfb4..b66ea937ec2 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 430b99d7667..807d99fb872 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -159,7 +159,7 @@ ut_time_ms(void);
store the given number of bits.
@param b in: bits
@return number of bytes (octets) needed to represent b */
-#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8)
+#define UT_BITS_IN_BYTES(b) (((b) + 7) >> 3)
/** Determines if a number is zero or a power of two.
@param[in] n number
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 81525680a33..e733a6a1d03 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -637,56 +637,82 @@ lock_rec_get_insert_intention(
return(lock->type_mode & LOCK_INSERT_INTENTION);
}
+#ifdef UNIV_DEBUG
#ifdef WITH_WSREP
-/** Check if both conflicting lock and other record lock are brute force
-(BF). This case is a bug so report lock information and wsrep state.
-@param[in] lock_rec1 conflicting waiting record lock or NULL
-@param[in] lock_rec2 other waiting record lock
-@param[in] trx1 lock_rec1 can be NULL, trx
+/** Check if both conflicting lock transaction and other transaction
+requesting record lock are brute force (BF). If they are check is
+this BF-BF wait correct and if not report BF wait and assert.
+
+@param[in] lock_rec other waiting record lock
+@param[in] trx trx requesting conflicting record lock
*/
-static void wsrep_assert_no_bf_bf_wait(
- const lock_t* lock_rec1,
- const lock_t* lock_rec2,
- const trx_t* trx1)
+static void wsrep_assert_no_bf_bf_wait(const lock_t *lock, const trx_t *trx)
{
- ut_ad(!lock_rec1 || lock_get_type_low(lock_rec1) == LOCK_REC);
- ut_ad(lock_get_type_low(lock_rec2) == LOCK_REC);
+ ut_ad(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(lock_mutex_own());
+ trx_t* lock_trx= lock->trx;
- if (!trx1->is_wsrep() || !lock_rec2->trx->is_wsrep())
- return;
- if (UNIV_LIKELY(!wsrep_thd_is_BF(trx1->mysql_thd, FALSE)))
+ /* Note that we are holding lock_sys->mutex, thus we should
+ not acquire THD::LOCK_thd_data mutex below to avoid mutexing
+ order violation. */
+
+ if (!trx->is_wsrep() || !lock_trx->is_wsrep())
return;
- if (UNIV_LIKELY(!wsrep_thd_is_BF(lock_rec2->trx->mysql_thd, FALSE)))
+ if (UNIV_LIKELY(!wsrep_thd_is_BF(trx->mysql_thd, FALSE))
+ || UNIV_LIKELY(!wsrep_thd_is_BF(lock_trx->mysql_thd, FALSE)))
return;
- mtr_t mtr;
+ ut_ad(trx->state == TRX_STATE_ACTIVE);
+
+ trx_mutex_enter(lock_trx);
+ const trx_state_t trx2_state= lock_trx->state;
+ trx_mutex_exit(lock_trx);
+
+ /* If transaction is already committed in memory or
+ prepared we should wait. When transaction is committed in
+ memory we held trx mutex, but not lock_sys->mutex. Therefore,
+ we could end here before transaction has time to do
+ lock_release() that is protected with lock_sys->mutex. */
+ switch (trx2_state) {
+ case TRX_STATE_COMMITTED_IN_MEMORY:
+ case TRX_STATE_PREPARED:
+ return;
+ case TRX_STATE_ACTIVE:
+ break;
+ default:
+ ut_ad("invalid state" == 0);
+ }
- if (lock_rec1) {
- ib::error() << "Waiting lock on table: "
- << lock_rec1->index->table->name
- << " index: "
- << lock_rec1->index->name()
- << " that has conflicting lock ";
- lock_rec_print(stderr, lock_rec1, mtr);
+ /* If BF - BF order is honored, i.e. trx already holding
+ record lock should be ordered before this new lock request
+ we can keep trx waiting for the lock. If conflicting
+ transaction is already aborting or rolling back for replaying
+ we can also let new transaction waiting. */
+ if (wsrep_thd_order_before(lock_trx->mysql_thd, trx->mysql_thd)
+ || wsrep_thd_is_aborting(lock_trx->mysql_thd)) {
+ return;
}
+ mtr_t mtr;
+
ib::error() << "Conflicting lock on table: "
- << lock_rec2->index->table->name
+ << lock->index->table->name
<< " index: "
- << lock_rec2->index->name()
+ << lock->index->name()
<< " that has lock ";
- lock_rec_print(stderr, lock_rec2, mtr);
+ lock_rec_print(stderr, lock, mtr);
ib::error() << "WSREP state: ";
- wsrep_report_bf_lock_wait(trx1->mysql_thd,
- trx1->id);
- wsrep_report_bf_lock_wait(lock_rec2->trx->mysql_thd,
- lock_rec2->trx->id);
+ wsrep_report_bf_lock_wait(trx->mysql_thd,
+ trx->id);
+ wsrep_report_bf_lock_wait(lock_trx->mysql_thd,
+ lock_trx->id);
/* BF-BF wait is a bug */
ut_error;
}
#endif /* WITH_WSREP */
+#endif /* UNIV_DEBUG */
/*********************************************************************//**
Checks if a lock request for a new lock has to wait for request lock2.
@@ -714,6 +740,7 @@ lock_rec_has_to_wait(
{
ut_ad(trx && lock2);
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
+ ut_ad(lock_mutex_own());
if (trx == lock2->trx
|| lock_mode_compatible(
@@ -794,9 +821,25 @@ lock_rec_has_to_wait(
}
#ifdef WITH_WSREP
- /* There should not be two conflicting locks that are
- brute force. If there is it is a bug. */
- wsrep_assert_no_bf_bf_wait(NULL, lock2, trx);
+ /* New lock request from a transaction is using unique key
+ scan and this transaction is a wsrep high priority transaction
+ (brute force). If conflicting transaction is also wsrep high
+ priority transaction we should avoid lock conflict because
+ ordering of these transactions is already decided and
+ conflicting transaction will be later replayed. Note
+ that thread holding conflicting lock can't be
+ committed or rolled back while we hold
+ lock_sys->mutex. */
+ if (trx->is_wsrep_UK_scan()
+ && wsrep_thd_is_BF(lock2->trx->mysql_thd, false)) {
+ return false;
+ }
+
+ /* We very well can let bf to wait normally as other
+ BF will be replayed in case of conflict. For debug
+ builds we will do additional sanity checks to catch
+ unsupported bf wait if any. */
+ ut_d(wsrep_assert_no_bf_bf_wait(lock2, trx));
#endif /* WITH_WSREP */
return true;
@@ -1065,65 +1108,31 @@ lock_rec_other_has_expl_req(
#endif /* UNIV_DEBUG */
#ifdef WITH_WSREP
-static
-void
-wsrep_kill_victim(
-/*==============*/
- const trx_t * const trx,
- const lock_t *lock)
+static void wsrep_kill_victim(const trx_t * const trx, const lock_t *lock)
{
ut_ad(lock_mutex_own());
- ut_ad(trx_mutex_own(lock->trx));
+ ut_ad(trx->is_wsrep());
+ trx_t* lock_trx = lock->trx;
+ ut_ad(trx_mutex_own(lock_trx));
+ ut_ad(lock_trx != trx);
- /* quit for native mysql */
- if (!trx->is_wsrep()) return;
-
- if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
+ if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE))
return;
- }
- my_bool bf_other = wsrep_thd_is_BF(lock->trx->mysql_thd, FALSE);
- mtr_t mtr;
+ if (lock_trx->state == TRX_STATE_COMMITTED_IN_MEMORY
+ || lock_trx->lock.was_chosen_as_deadlock_victim)
+ return;
- if ((!bf_other) ||
- (wsrep_thd_order_before(
- trx->mysql_thd, lock->trx->mysql_thd))) {
-
- if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
- if (UNIV_UNLIKELY(wsrep_debug)) {
- ib::info() << "WSREP: BF victim waiting\n";
- }
+ if (!wsrep_thd_is_BF(lock_trx->mysql_thd, FALSE)
+ || wsrep_thd_order_before(trx->mysql_thd, lock_trx->mysql_thd)) {
+ if (lock_trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
+ if (UNIV_UNLIKELY(wsrep_debug))
+ WSREP_INFO("BF victim waiting");
/* cannot release lock, until our lock
is in the queue*/
- } else if (lock->trx != trx) {
- if (wsrep_log_conflicts) {
- ib::info() << "*** Priority TRANSACTION:";
-
- trx_print_latched(stderr, trx, 3000);
-
- if (bf_other) {
- ib::info() << "*** Priority TRANSACTION:";
- } else {
- ib::info() << "*** Victim TRANSACTION:";
- }
- trx_print_latched(stderr, lock->trx, 3000);
-
- ib::info() << "*** WAITING FOR THIS LOCK TO BE GRANTED:";
-
- if (lock_get_type(lock) == LOCK_REC) {
- lock_rec_print(stderr, lock, mtr);
- } else {
- lock_table_print(stderr, lock);
- }
-
- ib::info() << " SQL1: "
- << wsrep_thd_query(trx->mysql_thd);
- ib::info() << " SQL2: "
- << wsrep_thd_query(lock->trx->mysql_thd);
- }
-
+ } else {
wsrep_innobase_kill_one_trx(trx->mysql_thd,
- lock->trx, true);
+ lock_trx, true);
}
}
}
@@ -1454,11 +1463,6 @@ lock_rec_create_low(
trx_mutex_exit(c_lock->trx);
- if (UNIV_UNLIKELY(wsrep_debug)) {
- wsrep_report_bf_lock_wait(trx->mysql_thd, trx->id);
- wsrep_report_bf_lock_wait(c_lock->trx->mysql_thd, c_lock->trx->id);
- }
-
/* have to bail out here to avoid lock_set_lock... */
return(lock);
}
@@ -2222,10 +2226,6 @@ static void lock_rec_dequeue_from_page(lock_t* in_lock)
/* Grant the lock */
ut_ad(lock->trx != in_lock->trx);
lock_grant(lock);
-#ifdef WITH_WSREP
- } else {
- wsrep_assert_no_bf_bf_wait(c, lock, c->trx);
-#endif /* WITH_WSREP */
}
}
} else {
@@ -4178,10 +4178,6 @@ released:
/* Grant the lock */
ut_ad(trx != lock->trx);
lock_grant(lock);
-#ifdef WITH_WSREP
- } else {
- wsrep_assert_no_bf_bf_wait(c, lock, c->trx);
-#endif /* WITH_WSREP */
}
}
} else {
@@ -4237,6 +4233,18 @@ lock_check_dict_lock(
and release possible other transactions waiting because of these locks. */
void lock_release(trx_t* trx)
{
+#ifdef UNIV_DEBUG
+ std::set<table_id_t> to_evict;
+ if (innodb_evict_tables_on_commit_debug && !trx->is_recovered)
+# if 1 /* if dict_stats_exec_sql() were not playing dirty tricks */
+ if (!mutex_own(&dict_sys.mutex))
+# else /* this would be more proper way to do it */
+ if (!trx->dict_operation_lock_mode && !trx->dict_operation)
+# endif
+ for (const auto& p : trx->mod_tables)
+ if (!p.first->is_temporary())
+ to_evict.emplace(p.first->id);
+#endif
ulint count = 0;
trx_id_t max_trx_id = trx_sys.get_max_trx_id();
@@ -4285,6 +4293,25 @@ void lock_release(trx_t* trx)
}
lock_mutex_exit();
+
+#ifdef UNIV_DEBUG
+ if (to_evict.empty()) {
+ return;
+ }
+ mutex_enter(&dict_sys.mutex);
+ lock_mutex_enter();
+ for (table_id_t id : to_evict) {
+ if (dict_table_t *table = dict_table_open_on_id(
+ id, TRUE, DICT_TABLE_OP_OPEN_ONLY_IF_CACHED)) {
+ if (!table->get_ref_count()
+ && !UT_LIST_GET_LEN(table->locks)) {
+ dict_sys.remove(table, true);
+ }
+ }
+ }
+ lock_mutex_exit();
+ mutex_exit(&dict_sys.mutex);
+#endif
}
/* True if a lock mode is S or X */
@@ -4454,7 +4481,8 @@ static void lock_rec_print(FILE* file, const lock_t* lock, mtr_t& mtr)
ut_ad(!page_rec_is_metadata(rec));
offsets = rec_get_offsets(
- rec, lock->index, offsets, true,
+ rec, lock->index, offsets,
+ lock->index->n_core_fields,
ULINT_UNDEFINED, &heap);
putc(' ', file);
@@ -5000,8 +5028,8 @@ loop:
ut_ad(!lock_rec_get_nth_bit(lock, i)
|| page_rec_is_leaf(rec));
offsets = rec_get_offsets(rec, lock->index, offsets,
- true, ULINT_UNDEFINED,
- &heap);
+ lock->index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
/* If this thread is holding the file space
latch (fil_space_t::latch), the following
@@ -5332,7 +5360,8 @@ lock_rec_insert_check_and_lock(
const rec_offs* offsets;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(next_rec, index, offsets_, true,
+ offsets = rec_get_offsets(next_rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
@@ -5640,6 +5669,19 @@ lock_sec_rec_modify_check_and_lock(
heap_no = page_rec_get_heap_no(rec);
+#ifdef WITH_WSREP
+ trx_t *trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary key is wsrep
+ high priority thread (brute force) this scanning may involve
+ GAP-locking in the index. As this locking happens also when
+ applying replication events in high priority applier threads,
+ there is a probability for lock conflicts between two wsrep
+ high priority threads. To avoid this GAP-locking we mark that
+ this transaction is using unique key scan here. */
+ if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false))
+ trx->wsrep_UK_scan= true;
+#endif /* WITH_WSREP */
+
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
index record, and this would not have been possible if another active
@@ -5648,6 +5690,10 @@ lock_sec_rec_modify_check_and_lock(
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
+
#ifdef UNIV_DEBUG
{
mem_heap_t* heap = NULL;
@@ -5655,7 +5701,8 @@ lock_sec_rec_modify_check_and_lock(
const rec_offs* offsets;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
@@ -5739,9 +5786,26 @@ lock_sec_rec_read_check_and_lock(
return DB_SUCCESS;
}
+#ifdef WITH_WSREP
+ trx_t *trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary key is wsrep
+ high priority thread (brute force) this scanning may involve
+ GAP-locking in the index. As this locking happens also when
+ applying replication events in high priority applier threads,
+ there is a probability for lock conflicts between two wsrep
+ high priority threads. To avoid this GAP-locking we mark that
+ this transaction is using unique key scan here. */
+ if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false))
+ trx->wsrep_UK_scan= true;
+#endif /* WITH_WSREP */
+
err = lock_rec_lock(FALSE, ulint(mode) | gap_mode,
block, heap_no, index, thr);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
+
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
return(err);
@@ -5850,7 +5914,7 @@ lock_clust_rec_read_check_and_lock_alt(
rec_offs_init(offsets_);
ut_ad(page_rec_is_leaf(rec));
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &tmp_heap);
err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
offsets, mode, gap_mode, thr);
@@ -6141,6 +6205,7 @@ lock_cancel_waiting_and_release(
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(lock->trx));
+ ut_ad(lock->trx->state == TRX_STATE_ACTIVE);
lock->trx->lock.cancel = true;
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index d7fef4e9675..4359fb6b308 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -972,7 +972,8 @@ fail:
DBUG_EXECUTE_IF("log_checksum_mismatch", { cksum = crc + 1; });
if (crc != cksum) {
- ib::error() << "Invalid log block checksum."
+ ib::error_or_warn(srv_operation != SRV_OPERATION_BACKUP)
+ << "Invalid log block checksum."
<< " block: " << block_number
<< " checkpoint no: "
<< log_block_get_checkpoint_no(buf)
@@ -2303,8 +2304,6 @@ void recv_apply_hashed_log_recs(bool last_batch)
recv_no_ibuf_operations
= !last_batch || is_mariabackup_restore_or_export();
- ut_d(recv_no_log_write = recv_no_ibuf_operations);
-
if (ulint n = recv_sys.n_addrs) {
if (!log_sys.log.subformat && !srv_force_recovery
&& srv_undo_tablespaces_open) {
@@ -2391,7 +2390,7 @@ apply:
/* Wait until all the pages have been processed */
- while (recv_sys.n_addrs != 0) {
+ while (recv_sys.n_addrs || buf_get_n_pending_read_ios()) {
const bool abort = recv_sys.found_corrupt_log
|| recv_sys.found_corrupt_fs;
@@ -3871,6 +3870,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
mutex_enter(&recv_sys.mutex);
recv_sys.apply_log_recs = true;
+ recv_no_ibuf_operations = is_mariabackup_restore_or_export();
+ ut_d(recv_no_log_write = recv_no_ibuf_operations);
mutex_exit(&recv_sys.mutex);
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index f96ff6b5171..62908d37337 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2019, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -2890,7 +2890,6 @@ os_file_create_func(
ut_a(type == OS_LOG_FILE
|| type == OS_DATA_FILE
- || type == OS_DATA_TEMP_FILE
|| type == OS_DATA_FILE_NO_O_DIRECT);
ut_a(purpose == OS_FILE_AIO || purpose == OS_FILE_NORMAL);
@@ -2938,7 +2937,7 @@ os_file_create_func(
/* We disable OS caching (O_DIRECT) only on data files */
if (!read_only
&& *success
- && (type != OS_LOG_FILE && type != OS_DATA_TEMP_FILE
+ && (type != OS_LOG_FILE
&& type != OS_DATA_FILE_NO_O_DIRECT)
&& (srv_file_flush_method == SRV_O_DIRECT
|| srv_file_flush_method == SRV_O_DIRECT_NO_FSYNC)) {
@@ -4137,7 +4136,9 @@ os_file_create_func(
case SRV_ALL_O_DIRECT_FSYNC:
/*Traditional Windows behavior, no buffering for any files.*/
- attributes |= FILE_FLAG_NO_BUFFERING;
+ if (type != OS_DATA_FILE_NO_O_DIRECT) {
+ attributes |= FILE_FLAG_NO_BUFFERING;
+ }
break;
case SRV_FSYNC:
@@ -7707,7 +7708,7 @@ static bool is_file_on_ssd(char *file_path)
/** Determine some file metadata when creating or reading the file.
@param file the file that is being created, or OS_FILE_CLOSED */
void fil_node_t::find_metadata(os_file_t file
-#ifdef UNIV_LINUX
+#ifndef _WIN32
, struct stat* statbuf
#endif
)
@@ -7747,18 +7748,18 @@ void fil_node_t::find_metadata(os_file_t file
block_size = 512;
}
#else
- on_ssd = space->atomic_write_supported;
-# ifdef UNIV_LINUX
- if (!on_ssd) {
- struct stat sbuf;
- if (!statbuf && !fstat(file, &sbuf)) {
- statbuf = &sbuf;
- }
- if (statbuf && fil_system.is_ssd(statbuf->st_dev)) {
- on_ssd = true;
- }
+ struct stat sbuf;
+ if (!statbuf && !fstat(file, &sbuf)) {
+ statbuf = &sbuf;
}
+ if (statbuf) {
+ block_size = statbuf->st_blksize;
+ }
+ on_ssd = space->atomic_write_supported
+# ifdef UNIV_LINUX
+ || (statbuf && fil_system.is_ssd(statbuf->st_dev))
# endif
+ ;
#endif
if (!space->atomic_write_supported) {
space->atomic_write_supported = atomic_write
@@ -7794,7 +7795,6 @@ bool fil_node_t::read_page0(bool first)
if (fstat(handle, &statbuf)) {
return false;
}
- block_size = statbuf.st_blksize;
os_offset_t size_bytes = statbuf.st_size;
#else
os_offset_t size_bytes = os_file_get_size(handle);
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index 0586d6d8a33..9bf9fe66b33 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2018, 2020, MariaDB Corporation.
+Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -75,7 +75,7 @@ page_cur_try_search_shortcut(
ut_ad(page_is_leaf(page));
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
dtuple_get_n_fields(tuple), &heap);
ut_ad(rec);
@@ -90,7 +90,8 @@ page_cur_try_search_shortcut(
next_rec = page_rec_get_next_const(rec);
if (!page_rec_is_supremum(next_rec)) {
- offsets = rec_get_offsets(next_rec, index, offsets, true,
+ offsets = rec_get_offsets(next_rec, index, offsets,
+ index->n_core_fields,
dtuple_get_n_fields(tuple), &heap);
if (cmp_dtuple_rec_with_match(tuple, next_rec, offsets,
@@ -159,7 +160,7 @@ page_cur_try_search_shortcut_bytes(
ut_ad(page_is_leaf(page));
rec = page_header_get_ptr(page, PAGE_LAST_INSERT);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
dtuple_get_n_fields(tuple), &heap);
ut_ad(rec);
@@ -180,7 +181,8 @@ page_cur_try_search_shortcut_bytes(
next_rec = page_rec_get_next_const(rec);
if (!page_rec_is_supremum(next_rec)) {
- offsets = rec_get_offsets(next_rec, index, offsets, true,
+ offsets = rec_get_offsets(next_rec, index, offsets,
+ index->n_core_fields,
dtuple_get_n_fields(tuple), &heap);
if (cmp_dtuple_rec_with_match_bytes(
@@ -321,14 +323,14 @@ page_cur_search_with_match(
#endif /* UNIV_ZIP_DEBUG */
ut_d(page_check_dir(page));
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
#ifdef BTR_CUR_HASH_ADAPT
- if (is_leaf
+ if (n_core
&& page_get_direction(page) == PAGE_RIGHT
&& page_header_get_offs(page, PAGE_LAST_INSERT)
&& mode == PAGE_CUR_LE
- && !dict_index_is_spatial(index)
+ && !index->is_spatial()
&& page_header_get_field(page, PAGE_N_DIRECTION) > 3
&& page_cur_try_search_shortcut(
block, index, tuple,
@@ -344,10 +346,10 @@ page_cur_search_with_match(
/* If the mode is for R-tree indexes, use the special MBR
related compare functions */
- if (dict_index_is_spatial(index) && mode > PAGE_CUR_LE) {
+ if (index->is_spatial() && mode > PAGE_CUR_LE) {
/* For leaf level insert, we still use the traditional
compare function for now */
- if (mode == PAGE_CUR_RTREE_INSERT && is_leaf) {
+ if (mode == PAGE_CUR_RTREE_INSERT && n_core) {
mode = PAGE_CUR_LE;
} else {
rtr_cur_search_with_match(
@@ -392,7 +394,7 @@ page_cur_search_with_match(
offsets = offsets_;
offsets = rec_get_offsets(
- mid_rec, index, offsets, is_leaf,
+ mid_rec, index, offsets, n_core,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
@@ -446,7 +448,7 @@ up_slot_match:
offsets = offsets_;
offsets = rec_get_offsets(
- mid_rec, index, offsets, is_leaf,
+ mid_rec, index, offsets, n_core,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match(
@@ -627,7 +629,7 @@ page_cur_search_with_match_bytes(
/* Perform binary search until the lower and upper limit directory
slots come to the distance 1 of each other */
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
while (up - low > 1) {
mid = (low + up) / 2;
@@ -639,7 +641,7 @@ page_cur_search_with_match_bytes(
up_matched_fields, up_matched_bytes);
offsets = rec_get_offsets(
- mid_rec, index, offsets_, is_leaf,
+ mid_rec, index, offsets_, n_core,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match_bytes(
@@ -707,7 +709,7 @@ up_slot_match:
}
offsets = rec_get_offsets(
- mid_rec, index, offsets_, is_leaf,
+ mid_rec, index, offsets_, n_core,
dtuple_get_n_fields_cmp(tuple), &heap);
cmp = cmp_dtuple_rec_with_match_bytes(
@@ -817,7 +819,8 @@ page_cur_insert_rec_write_log(
ut_ad(!page_rec_is_comp(insert_rec)
== !dict_table_is_comp(index->table));
- const bool is_leaf = page_rec_is_leaf(cursor_rec);
+ const ulint n_core = page_rec_is_leaf(cursor_rec)
+ ? index->n_core_fields : 0;
{
mem_heap_t* heap = NULL;
@@ -831,9 +834,9 @@ page_cur_insert_rec_write_log(
rec_offs_init(ins_offs_);
cur_offs = rec_get_offsets(cursor_rec, index, cur_offs_,
- is_leaf, ULINT_UNDEFINED, &heap);
+ n_core, ULINT_UNDEFINED, &heap);
ins_offs = rec_get_offsets(insert_rec, index, ins_offs_,
- is_leaf, ULINT_UNDEFINED, &heap);
+ n_core, ULINT_UNDEFINED, &heap);
extra_size = rec_offs_extra_size(ins_offs);
cur_extra_size = rec_offs_extra_size(cur_offs);
@@ -1091,9 +1094,9 @@ page_cur_parse_insert_rec(
/* Read from the log the inserted index record end segment which
differs from the cursor record */
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
- offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(cursor_rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
if (!(end_seg_len & 0x1UL)) {
@@ -1142,7 +1145,7 @@ page_cur_parse_insert_rec(
page_cur_position(cursor_rec, block, &cursor);
offsets = rec_get_offsets(buf + origin_offset, index, offsets,
- is_leaf, ULINT_UNDEFINED, &heap);
+ n_core, ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(!page_cur_rec_insert(&cursor,
buf + origin_offset,
index, offsets, mtr))) {
@@ -1323,7 +1326,8 @@ page_cur_insert_rec_low(
rec_offs_init(foffsets_);
foffsets = rec_get_offsets(
- free_rec, index, foffsets, page_is_leaf(page),
+ free_rec, index, foffsets,
+ page_is_leaf(page) ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
if (rec_offs_size(foffsets) < rec_size) {
if (UNIV_LIKELY_NULL(heap)) {
@@ -1736,7 +1740,8 @@ page_cur_insert_rec_zip(
rec_offs_init(foffsets_);
foffsets = rec_get_offsets(free_rec, index, foffsets,
- page_rec_is_leaf(free_rec),
+ page_rec_is_leaf(free_rec)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
if (rec_offs_size(foffsets) < rec_size) {
too_small:
@@ -2097,10 +2102,11 @@ page_copy_rec_list_end_to_created_page(
slot_index = 0;
n_recs = 0;
- const bool is_leaf = page_is_leaf(new_page);
+ const ulint n_core = page_is_leaf(new_page)
+ ? index->n_core_fields : 0;
do {
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
insert_rec = rec_copy(heap_top, rec, offsets);
@@ -2142,7 +2148,7 @@ page_copy_rec_list_end_to_created_page(
heap_top += rec_size;
- rec_offs_make_valid(insert_rec, index, is_leaf, offsets);
+ rec_offs_make_valid(insert_rec, index, n_core != 0, offsets);
page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec,
index, mtr);
prev_rec = insert_rec;
@@ -2279,7 +2285,8 @@ page_cur_parse_delete_rec(
page_cur_delete_rec(&cursor, index,
rec_get_offsets(rec, index, offsets_,
- page_rec_is_leaf(rec),
+ page_rec_is_leaf(rec)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap),
mtr);
if (UNIV_LIKELY_NULL(heap)) {
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index ae2cf1870e1..fc33b38beda 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -540,7 +540,8 @@ page_copy_rec_list_end_no_locks(
ut_a(page_is_comp(new_page) == page_rec_is_comp(rec));
ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint)
(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
- const bool is_leaf = page_is_leaf(block->frame);
+ const ulint n_core = page_is_leaf(block->frame)
+ ? index->n_core_fields : 0;
cur2 = page_get_infimum_rec(buf_block_get_frame(new_block));
@@ -548,7 +549,7 @@ page_copy_rec_list_end_no_locks(
while (!page_cur_is_after_last(&cur1)) {
rec_t* ins_rec;
- offsets = rec_get_offsets(cur1.rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(cur1.rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
ins_rec = page_cur_insert_rec_low(cur2, index,
cur1.rec, offsets, mtr);
@@ -777,7 +778,7 @@ page_copy_rec_list_start(
cur2 = ret;
- const bool is_leaf = page_rec_is_leaf(rec);
+ const ulint n_core = page_rec_is_leaf(rec) ? index->n_core_fields : 0;
/* Copy records from the original page to the new page */
if (index->is_spatial()) {
@@ -799,7 +800,7 @@ page_copy_rec_list_start(
} else {
while (page_cur_get_rec(&cur1) != rec) {
offsets = rec_get_offsets(cur1.rec, index, offsets,
- is_leaf,
+ n_core,
ULINT_UNDEFINED, &heap);
cur2 = page_cur_insert_rec_low(cur2, index,
cur1.rec, offsets, mtr);
@@ -819,7 +820,7 @@ page_copy_rec_list_start(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (is_leaf && dict_index_is_sec_or_ibuf(index)
+ if (n_core && dict_index_is_sec_or_ibuf(index)
&& !index->table->is_temporary()) {
page_update_max_trx_id(new_block, NULL,
page_get_max_trx_id(page_align(rec)),
@@ -1050,7 +1051,7 @@ delete_all:
? MLOG_COMP_LIST_END_DELETE
: MLOG_LIST_END_DELETE, mtr);
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
if (page_zip) {
mtr_log_t log_mode;
@@ -1064,7 +1065,7 @@ delete_all:
page_cur_t cur;
page_cur_position(rec, block, &cur);
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
rec = rec_get_next_ptr(rec, TRUE);
#ifdef UNIV_ZIP_DEBUG
@@ -1097,8 +1098,7 @@ delete_all:
do {
ulint s;
- offsets = rec_get_offsets(rec2, index, offsets,
- is_leaf,
+ offsets = rec_get_offsets(rec2, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
s = rec_offs_size(offsets);
ut_ad(ulint(rec2 - page) + s
@@ -1244,11 +1244,12 @@ page_delete_rec_list_start(
/* Individual deletes are not logged */
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
- const bool is_leaf = page_rec_is_leaf(rec);
+ const ulint n_core = page_rec_is_leaf(rec)
+ ? index->n_core_fields : 0;
while (page_cur_get_rec(&cur1) != rec) {
offsets = rec_get_offsets(page_cur_get_rec(&cur1), index,
- offsets, is_leaf,
+ offsets, n_core,
ULINT_UNDEFINED, &heap);
page_cur_delete_rec(&cur1, index, offsets, mtr);
}
@@ -2461,9 +2462,10 @@ wrong_page_type:
rec = page_get_infimum_rec(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0;
+
for (;;) {
- offsets = rec_get_offsets(rec, index, offsets,
- page_is_leaf(page),
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
if (page_is_comp(page) && page_rec_is_user_rec(rec)
@@ -2709,8 +2711,7 @@ n_owned_zero:
rec = page_header_get_ptr(page, PAGE_FREE);
while (rec != NULL) {
- offsets = rec_get_offsets(rec, index, offsets,
- page_is_leaf(page),
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(!page_rec_validate(rec, offsets))) {
ret = FALSE;
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index eb94aad207c..111a400ec92 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -877,7 +877,7 @@ page_zip_compress_node_ptrs(
do {
const rec_t* rec = *recs++;
- offsets = rec_get_offsets(rec, index, offsets, false,
+ offsets = rec_get_offsets(rec, index, offsets, 0,
ULINT_UNDEFINED, &heap);
/* Only leaf nodes may contain externally stored columns. */
ut_ad(!rec_offs_any_extern(offsets));
@@ -1126,7 +1126,7 @@ page_zip_compress_clust(
do {
const rec_t* rec = *recs++;
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_n_fields(offsets)
== dict_index_get_n_fields(index));
@@ -1349,33 +1349,6 @@ page_zip_compress(
MONITOR_INC(MONITOR_PAGE_COMPRESS);
- /* Simulate a compression failure with a probability determined by
- innodb_simulate_comp_failures, only if the page has 2 or more
- records. */
-
- if (srv_simulate_comp_failures
- && !dict_index_is_ibuf(index)
- && page_get_n_recs(page) >= 2
- && ((ulint)(rand() % 100) < srv_simulate_comp_failures)
- && strcmp(index->table->name.m_name, "IBUF_DUMMY")) {
-
-#ifdef UNIV_DEBUG
- ib::error()
- << "Simulating a compression failure"
- << " for table " << index->table->name
- << " index "
- << index->name()
- << " page "
- << page_get_page_no(page)
- << "("
- << (page_is_leaf(page) ? "leaf" : "non-leaf")
- << ")";
-
-#endif
-
- goto err_exit;
- }
-
heap = mem_heap_create(page_zip_get_size(page_zip)
+ n_fields * (2 + sizeof(ulint))
+ REC_OFFS_HEADER_SIZE
@@ -2032,7 +2005,7 @@ page_zip_apply_log(
sorted by address (indexed by
heap_no - PAGE_HEAP_NO_USER_LOW) */
ulint n_dense,/*!< in: size of recs[] */
- bool is_leaf,/*!< in: whether this is a leaf page */
+ ulint n_core, /*!< in: index->n_fields, or 0 for non-leaf */
ulint trx_id_col,/*!< in: column number of trx_id in the index,
or ULINT_UNDEFINED if none */
ulint heap_status,
@@ -2108,7 +2081,7 @@ page_zip_apply_log(
/* Clear the data bytes of the record. */
mem_heap_t* heap = NULL;
rec_offs* offs;
- offs = rec_get_offsets(rec, index, offsets, is_leaf,
+ offs = rec_get_offsets(rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
memset(rec, 0, rec_offs_data_size(offs));
@@ -2126,7 +2099,7 @@ page_zip_apply_log(
This will be overwritten in page_zip_set_extra_bytes(),
called by page_zip_decompress_low(). */
ut_d(rec[-REC_NEW_INFO_BITS] = 0);
- rec_offs_make_valid(rec, index, is_leaf, offsets);
+ rec_offs_make_valid(rec, index, n_core != 0, offsets);
/* Copy the extra bytes (backwards). */
{
@@ -2306,7 +2279,7 @@ page_zip_decompress_node_ptrs(
}
/* Read the offsets. The status bits are needed here. */
- offsets = rec_get_offsets(rec, index, offsets, false,
+ offsets = rec_get_offsets(rec, index, offsets, 0,
ULINT_UNDEFINED, &heap);
/* Non-leaf nodes should not have any externally
@@ -2393,7 +2366,7 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense, false,
+ recs, n_dense, 0,
ULINT_UNDEFINED, heap_status,
index, offsets);
@@ -2424,7 +2397,7 @@ zlib_done:
for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot];
- offsets = rec_get_offsets(rec, index, offsets, false,
+ offsets = rec_get_offsets(rec, index, offsets, 0,
ULINT_UNDEFINED, &heap);
/* Non-leaf nodes should not have any externally
stored columns. */
@@ -2546,7 +2519,8 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense, true,
+ recs, n_dense,
+ index->n_fields,
ULINT_UNDEFINED, heap_status,
index, offsets);
@@ -2749,7 +2723,7 @@ page_zip_decompress_clust(
}
/* Read the offsets. The status bits are needed here. */
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
ULINT_UNDEFINED, &heap);
/* This is a leaf page in a clustered index. */
@@ -2876,7 +2850,8 @@ zlib_done:
const byte* mod_log_ptr;
mod_log_ptr = page_zip_apply_log(d_stream->next_in,
d_stream->avail_in + 1,
- recs, n_dense, true,
+ recs, n_dense,
+ index->n_fields,
trx_id_col, heap_status,
index, offsets);
@@ -2912,7 +2887,7 @@ zlib_done:
rec_t* rec = recs[slot];
bool exists = !page_zip_dir_find_free(
page_zip, page_offset(rec));
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_fields,
ULINT_UNDEFINED, &heap);
dst = rec_get_nth_field(rec, offsets,
@@ -3436,7 +3411,7 @@ page_zip_validate_low(
page + PAGE_NEW_INFIMUM, TRUE);
trec = page_rec_get_next_low(
temp_page + PAGE_NEW_INFIMUM, TRUE);
- const bool is_leaf = page_is_leaf(page);
+ const ulint n_core = page_is_leaf(page) ? index->n_fields : 0;
do {
if (page_offset(rec) != page_offset(trec)) {
@@ -3451,7 +3426,7 @@ page_zip_validate_low(
if (index) {
/* Compare the data. */
offsets = rec_get_offsets(
- rec, index, offsets, is_leaf,
+ rec, index, offsets, n_core,
ULINT_UNDEFINED, &heap);
if (memcmp(rec - rec_offs_extra_size(offsets),
diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc
index ebfe7ada3b1..a27411c3d8b 100644
--- a/storage/innobase/pars/pars0pars.cc
+++ b/storage/innobase/pars/pars0pars.cc
@@ -1219,6 +1219,7 @@ pars_update_statement(
sel_node->row_lock_mode = LOCK_X;
} else {
node->has_clust_rec_x_lock = sel_node->set_x_locks;
+ ut_ad(node->has_clust_rec_x_lock);
}
ut_a(sel_node->n_tables == 1);
diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc
index 3f4810dcc0e..e98d50ea0fc 100644
--- a/storage/innobase/que/que0que.cc
+++ b/storage/innobase/que/que0que.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, 2020 MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -349,7 +349,6 @@ que_fork_start_command(
case QUE_THR_RUNNING:
case QUE_THR_LOCK_WAIT:
- case QUE_THR_PROCEDURE_WAIT:
ut_error;
}
}
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index b3c2fc84231..581637be073 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,6 +29,7 @@ Created 5/30/1994 Heikki Tuuri
#include "mtr0log.h"
#include "fts0fts.h"
#include "trx0sys.h"
+#include "row0log.h"
/* PHYSICAL RECORD (OLD STYLE)
===========================
@@ -272,9 +273,9 @@ rec_init_offsets_comp_ordinary(
ulint n_fields = n_core;
ulint null_mask = 1;
- ut_ad(index->n_core_fields >= n_core);
ut_ad(n_core > 0);
- ut_ad(index->n_fields >= n_core);
+ ut_ad(index->n_core_fields >= n_core);
+ ut_ad(index->n_fields >= index->n_core_fields);
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_INSTANT
|| dict_table_is_comp(index->table));
@@ -282,6 +283,11 @@ rec_init_offsets_comp_ordinary(
|| index->n_fields == rec_offs_n_fields(offsets));
ut_d(ulint n_null= 0);
+ const unsigned n_core_null_bytes = UNIV_UNLIKELY(index->n_core_fields
+ != n_core)
+ ? UT_BITS_IN_BYTES(unsigned(index->get_n_nullable(n_core)))
+ : index->n_core_null_bytes;
+
if (mblob) {
ut_ad(index->is_dummy || index->table->instant);
ut_ad(index->is_dummy || index->is_instant());
@@ -296,7 +302,7 @@ rec_init_offsets_comp_ordinary(
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
ut_d(n_null = n_nullable);
ut_ad(n_null <= index->n_nullable);
- ut_ad(n_null_bytes >= index->n_core_null_bytes
+ ut_ad(n_null_bytes >= n_core_null_bytes
|| n_core < index->n_core_fields);
lens = --nulls - n_null_bytes;
goto start;
@@ -313,9 +319,9 @@ rec_init_offsets_comp_ordinary(
case REC_LEAF_ORDINARY:
nulls -= REC_N_NEW_EXTRA_BYTES;
ordinary:
- lens = --nulls - index->n_core_null_bytes;
+ lens = --nulls - n_core_null_bytes;
- ut_d(n_null = std::min<uint>(index->n_core_null_bytes * 8U,
+ ut_d(n_null = std::min<uint>(n_core_null_bytes * 8U,
index->n_nullable));
break;
case REC_LEAF_INSTANT:
@@ -329,7 +335,7 @@ ordinary:
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
ut_d(n_null = n_nullable);
ut_ad(n_null <= index->n_nullable);
- ut_ad(n_null_bytes >= index->n_core_null_bytes
+ ut_ad(n_null_bytes >= n_core_null_bytes
|| n_core < index->n_core_fields);
lens = --nulls - n_null_bytes;
}
@@ -583,14 +589,14 @@ is (SQL_NULL), the field i is NULL. When the type of the offset at [i+1]
is (STORED_OFFPAGE), the field i is stored externally.
@param[in] rec record
@param[in] index the index that the record belongs in
-@param[in] leaf whether the record resides in a leaf page
+@param[in] n_core 0, or index->n_core_fields for leaf page
@param[in,out] offsets array of offsets, with valid rec_offs_n_fields() */
static
void
rec_init_offsets(
const rec_t* rec,
const dict_index_t* index,
- bool leaf,
+ ulint n_core,
rec_offs* offsets)
{
ulint i = 0;
@@ -605,6 +611,8 @@ rec_init_offsets(
|| index->in_instant_init);
ut_d(memcpy(&offsets[RECORD_OFFSET], &rec, sizeof(rec)));
ut_d(memcpy(&offsets[INDEX_OFFSET], &index, sizeof(index)));
+ ut_ad(index->n_fields >= n_core);
+ ut_ad(index->n_core_fields >= n_core);
if (dict_table_is_comp(index->table)) {
const byte* nulls;
@@ -623,23 +631,21 @@ rec_init_offsets(
rec_offs_base(offsets)[1] = 8;
return;
case REC_STATUS_NODE_PTR:
- ut_ad(!leaf);
+ ut_ad(!n_core);
n_node_ptr_field
= dict_index_get_n_unique_in_tree_nonleaf(
index);
break;
case REC_STATUS_INSTANT:
- ut_ad(leaf);
ut_ad(index->is_instant());
rec_init_offsets_comp_ordinary(rec, index, offsets,
- index->n_core_fields,
+ n_core,
NULL,
REC_LEAF_INSTANT);
return;
case REC_STATUS_ORDINARY:
- ut_ad(leaf);
rec_init_offsets_comp_ordinary(rec, index, offsets,
- index->n_core_fields,
+ n_core,
NULL,
REC_LEAF_ORDINARY);
return;
@@ -796,7 +802,7 @@ resolved:
@param[in] index the index that the record belongs to
@param[in,out] offsets array comprising offsets[0] allocated elements,
or an array from rec_get_offsets(), or NULL
-@param[in] leaf whether this is a leaf-page record
+@param[in] n_core 0, or index->n_core_fields for leaf page
@param[in] n_fields maximum number of offsets to compute
(ULINT_UNDEFINED to compute all offsets)
@param[in,out] heap memory heap
@@ -806,7 +812,7 @@ rec_get_offsets_func(
const rec_t* rec,
const dict_index_t* index,
rec_offs* offsets,
- bool leaf,
+ ulint n_core,
ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
@@ -818,6 +824,15 @@ rec_get_offsets_func(
ulint size;
bool alter_metadata = false;
+ ut_ad(index->n_core_fields >= n_core);
+ /* This assertion was relaxed for the btr_cur_open_at_index_side()
+ call in btr_cur_instant_init_low(). We cannot invoke
+ index->is_instant(), because the same assertion would fail there
+ until btr_cur_instant_init_low() has invoked
+ dict_table_t::deserialise_columns(). */
+ ut_ad(index->n_fields >= index->n_core_fields
+ || index->in_instant_init);
+
if (dict_table_is_comp(index->table)) {
switch (UNIV_EXPECT(rec_get_status(rec),
REC_STATUS_ORDINARY)) {
@@ -825,14 +840,14 @@ rec_get_offsets_func(
alter_metadata = rec_is_alter_metadata(rec, true);
/* fall through */
case REC_STATUS_ORDINARY:
- ut_ad(leaf);
+ ut_ad(n_core);
n = dict_index_get_n_fields(index) + alter_metadata;
break;
case REC_STATUS_NODE_PTR:
/* Node pointer records consist of the
uniquely identifying fields of the record
followed by a child page number field. */
- ut_ad(!leaf);
+ ut_ad(!n_core);
n = dict_index_get_n_unique_in_tree_nonleaf(index) + 1;
break;
case REC_STATUS_INFIMUM:
@@ -861,19 +876,19 @@ rec_get_offsets_func(
>= PAGE_HEAP_NO_USER_LOW;
/* The infimum and supremum records carry 1 field. */
ut_ad(is_user_rec || n == 1);
- ut_ad(!is_user_rec || leaf || index->is_dummy
+ ut_ad(!is_user_rec || n_core || index->is_dummy
|| dict_index_is_ibuf(index)
|| n == n_fields /* dict_stats_analyze_index_level() */
|| n
== dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
- ut_ad(!is_user_rec || !leaf || index->is_dummy
+ ut_ad(!is_user_rec || !n_core || index->is_dummy
|| dict_index_is_ibuf(index)
|| n == n_fields /* btr_pcur_restore_position() */
|| (n + (index->id == DICT_INDEXES_ID)
- >= index->n_core_fields && n <= index->n_fields
+ >= n_core && n <= index->n_fields
+ unsigned(rec_is_alter_metadata(rec, false))));
- if (is_user_rec && leaf && n < index->n_fields) {
+ if (is_user_rec && n_core && n < index->n_fields) {
ut_ad(!index->is_dummy);
ut_ad(!dict_index_is_ibuf(index));
n = index->n_fields;
@@ -907,17 +922,17 @@ rec_get_offsets_func(
memcpy(&offsets[RECORD_OFFSET], &rec, sizeof rec);
memcpy(&offsets[INDEX_OFFSET], &index, sizeof index);
#endif /* UNIV_DEBUG */
- ut_ad(leaf);
+ ut_ad(n_core);
ut_ad(index->is_dummy || index->table->instant);
ut_ad(index->is_dummy || index->is_instant());
ut_ad(rec_offs_n_fields(offsets)
<= ulint(index->n_fields) + 1);
rec_init_offsets_comp_ordinary<true>(rec, index, offsets,
index->n_core_fields,
- NULL,
+ nullptr,
REC_LEAF_INSTANT);
} else {
- rec_init_offsets(rec, index, leaf, offsets);
+ rec_init_offsets(rec, index, n_core, offsets);
}
return offsets;
}
@@ -1094,7 +1109,8 @@ rec_get_nth_field_offs_old(
}
/** Determine the size of a data tuple prefix in ROW_FORMAT=COMPACT.
-@tparam mblob whether the record includes a metadata BLOB
+@tparam mblob whether the record includes a metadata BLOB
+@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format
@param[in] index record descriptor; dict_table_is_comp()
is assumed to hold, even if it doesn't
@param[in] dfield array of data fields
@@ -1103,7 +1119,7 @@ rec_get_nth_field_offs_old(
@param[in] status status flags
@param[in] temp whether this is a temporary file record
@return total size */
-template<bool mblob = false>
+template<bool mblob = false, bool redundant_temp = false>
static inline
ulint
rec_get_converted_size_comp_prefix_low(
@@ -1120,25 +1136,27 @@ rec_get_converted_size_comp_prefix_low(
ut_d(ulint n_null = index->n_nullable);
ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_NODE_PTR
|| status == REC_STATUS_INSTANT);
+ unsigned n_core_fields = redundant_temp
+ ? row_log_get_n_core_fields(index)
+ : index->n_core_fields;
if (mblob) {
- ut_ad(!temp);
ut_ad(index->table->instant);
- ut_ad(index->is_instant());
+ ut_ad(!redundant_temp && index->is_instant());
ut_ad(status == REC_STATUS_INSTANT);
ut_ad(n_fields == ulint(index->n_fields) + 1);
extra_size += UT_BITS_IN_BYTES(index->n_nullable)
+ rec_get_n_add_field_len(n_fields - 1
- - index->n_core_fields);
+ - n_core_fields);
} else if (status == REC_STATUS_INSTANT
- && (!temp || n_fields > index->n_core_fields)) {
- ut_ad(index->is_instant());
+ && (!temp || n_fields > n_core_fields)) {
+ if (!redundant_temp) { ut_ad(index->is_instant()); }
ut_ad(UT_BITS_IN_BYTES(n_null) >= index->n_core_null_bytes);
extra_size += UT_BITS_IN_BYTES(index->get_n_nullable(n_fields))
+ rec_get_n_add_field_len(n_fields - 1
- - index->n_core_fields);
+ - n_core_fields);
} else {
- ut_ad(n_fields <= index->n_core_fields);
+ ut_ad(n_fields <= n_core_fields);
extra_size += index->n_core_null_bytes;
}
@@ -1442,8 +1460,9 @@ rec_convert_dtuple_to_rec_old(
/* If the data is not SQL null, store it */
len = dfield_get_len(field);
- memcpy(rec + end_offset,
- dfield_get_data(field), len);
+ if (len)
+ memcpy(rec + end_offset,
+ dfield_get_data(field), len);
end_offset += len;
ored_offset = end_offset;
@@ -1470,8 +1489,9 @@ rec_convert_dtuple_to_rec_old(
/* If the data is not SQL null, store it */
len = dfield_get_len(field);
- memcpy(rec + end_offset,
- dfield_get_data(field), len);
+ if (len)
+ memcpy(rec + end_offset,
+ dfield_get_data(field), len);
end_offset += len;
ored_offset = end_offset;
@@ -1489,7 +1509,8 @@ rec_convert_dtuple_to_rec_old(
}
/** Convert a data tuple into a ROW_FORMAT=COMPACT record.
-@tparam mblob whether the record includes a metadata BLOB
+@tparam mblob whether the record includes a metadata BLOB
+@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format
@param[out] rec converted record
@param[in] index index
@param[in] field data fields to convert
@@ -1497,7 +1518,7 @@ rec_convert_dtuple_to_rec_old(
@param[in] status rec_get_status(rec)
@param[in] temp whether to use the format for temporary files
in index creation */
-template<bool mblob = false>
+template<bool mblob = false, bool redundant_temp = false>
static inline
void
rec_convert_dtuple_to_rec_comp(
@@ -1514,7 +1535,9 @@ rec_convert_dtuple_to_rec_comp(
byte* UNINIT_VAR(lens);
ulint UNINIT_VAR(n_node_ptr_field);
ulint null_mask = 1;
-
+ const ulint n_core_fields = redundant_temp
+ ? row_log_get_n_core_fields(index)
+ : index->n_core_fields;
ut_ad(n_fields > 0);
ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
@@ -1524,11 +1547,10 @@ rec_convert_dtuple_to_rec_comp(
if (mblob) {
ut_ad(!temp);
ut_ad(index->table->instant);
- ut_ad(index->is_instant());
+ ut_ad(!redundant_temp && index->is_instant());
ut_ad(status == REC_STATUS_INSTANT);
ut_ad(n_fields == ulint(index->n_fields) + 1);
- rec_set_n_add_field(nulls, n_fields - 1
- - index->n_core_fields);
+ rec_set_n_add_field(nulls, n_fields - 1 - n_core_fields);
rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW);
rec_set_status(rec, REC_STATUS_INSTANT);
n_node_ptr_field = ULINT_UNDEFINED;
@@ -1537,20 +1559,17 @@ rec_convert_dtuple_to_rec_comp(
}
switch (status) {
case REC_STATUS_INSTANT:
- ut_ad(index->is_instant());
- ut_ad(n_fields > index->n_core_fields);
- rec_set_n_add_field(nulls, n_fields - 1
- - index->n_core_fields);
+ if (!redundant_temp) { ut_ad(index->is_instant()); }
+ ut_ad(n_fields > n_core_fields);
+ rec_set_n_add_field(nulls, n_fields - 1 - n_core_fields);
/* fall through */
case REC_STATUS_ORDINARY:
ut_ad(n_fields <= dict_index_get_n_fields(index));
if (!temp) {
rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW);
-
- rec_set_status(
- rec, n_fields == index->n_core_fields
- ? REC_STATUS_ORDINARY
- : REC_STATUS_INSTANT);
+ rec_set_status(rec, n_fields == n_core_fields
+ ? REC_STATUS_ORDINARY
+ : REC_STATUS_INSTANT);
}
if (dict_table_is_comp(index->table)) {
@@ -1768,12 +1787,14 @@ rec_convert_dtuple_to_rec(
}
/** Determine the size of a data tuple prefix in a temporary file.
+@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
@return total size, in bytes */
+template<bool redundant_temp>
ulint
rec_get_converted_size_temp(
const dict_index_t* index,
@@ -1782,10 +1803,18 @@ rec_get_converted_size_temp(
ulint* extra,
rec_comp_status_t status)
{
- return rec_get_converted_size_comp_prefix_low(
+ return rec_get_converted_size_comp_prefix_low<false,redundant_temp>(
index, fields, n_fields, extra, status, true);
}
+template ulint rec_get_converted_size_temp<false>(
+ const dict_index_t*, const dfield_t*, ulint, ulint*,
+ rec_comp_status_t);
+
+template ulint rec_get_converted_size_temp<true>(
+ const dict_index_t*, const dfield_t*, ulint, ulint*,
+ rec_comp_status_t);
+
/** Determine the offset to each field in temporary file.
@param[in] rec temporary file record
@param[in] index index of that the record belongs to
@@ -1838,6 +1867,7 @@ rec_init_offsets_temp(
@param[in] n_fields number of data fields
@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
*/
+template<bool redundant_temp>
void
rec_convert_dtuple_to_temp(
rec_t* rec,
@@ -1846,15 +1876,25 @@ rec_convert_dtuple_to_temp(
ulint n_fields,
rec_comp_status_t status)
{
- rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
- status, true);
+ rec_convert_dtuple_to_rec_comp<false,redundant_temp>(
+ rec, index, fields, n_fields, status, true);
}
+template void rec_convert_dtuple_to_temp<false>(
+ rec_t*, const dict_index_t*, const dfield_t*,
+ ulint, rec_comp_status_t);
+
+template void rec_convert_dtuple_to_temp<true>(
+ rec_t*, const dict_index_t*, const dfield_t*,
+ ulint, rec_comp_status_t);
+
/** Copy the first n fields of a (copy of a) physical record to a data tuple.
The fields are copied into the memory heap.
@param[out] tuple data tuple
@param[in] rec index record, or a copy thereof
-@param[in] is_leaf whether rec is a leaf page record
+@param[in] index index of rec
+@param[in] n_core index->n_core_fields at the time rec was
+ copied, or 0 if non-leaf page record
@param[in] n_fields number of fields to copy
@param[in,out] heap memory heap */
void
@@ -1862,7 +1902,7 @@ rec_copy_prefix_to_dtuple(
dtuple_t* tuple,
const rec_t* rec,
const dict_index_t* index,
- bool is_leaf,
+ ulint n_core,
ulint n_fields,
mem_heap_t* heap)
{
@@ -1870,10 +1910,11 @@ rec_copy_prefix_to_dtuple(
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
- ut_ad(is_leaf || n_fields
+ ut_ad(n_core <= index->n_core_fields);
+ ut_ad(n_core || n_fields
<= dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
- offsets = rec_get_offsets(rec, index, offsets, is_leaf,
+ offsets = rec_get_offsets(rec, index, offsets, n_core,
n_fields, &heap);
ut_ad(rec_validate(rec, offsets));
@@ -2513,7 +2554,8 @@ rec_print(
rec_print_new(file, rec,
rec_get_offsets(rec, index, offsets_,
- page_rec_is_leaf(rec),
+ page_rec_is_leaf(rec)
+ ? index->n_core_fields : 0,
ULINT_UNDEFINED, &heap));
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
@@ -2589,7 +2631,8 @@ operator<<(std::ostream& o, const rec_index_print& r)
{
mem_heap_t* heap = NULL;
rec_offs* offsets = rec_get_offsets(
- r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec),
+ r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec)
+ ? r.m_index->n_core_fields : 0,
ULINT_UNDEFINED, &heap);
rec_print(o, r.m_rec,
rec_get_info_bits(r.m_rec, rec_offs_comp(offsets)),
@@ -2628,7 +2671,7 @@ rec_get_trx_id(
rec_offs_init(offsets_);
rec_offs* offsets = offsets_;
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
index->db_trx_id() + 1, &heap);
trx_id = rec_get_nth_field(rec, offsets, index->db_trx_id(), &len);
@@ -2679,7 +2722,8 @@ wsrep_rec_get_foreign_key(
ut_ad(index_ref);
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index_for, offsets_, true,
+ offsets = rec_get_offsets(rec, index_for, offsets_,
+ index_for->n_core_fields,
ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_validate(rec, NULL, offsets));
@@ -2761,9 +2805,24 @@ wsrep_rec_get_foreign_key(
break;
case DATA_BLOB:
case DATA_BINARY:
+ case DATA_FIXBINARY:
+ case DATA_GEOMETRY:
memcpy(buf, data, len);
break;
- default:
+
+ case DATA_FLOAT:
+ {
+ float f = mach_float_read(data);
+ memcpy(buf, &f, sizeof(float));
+ }
+ break;
+ case DATA_DOUBLE:
+ {
+ double d = mach_double_read(data);
+ memcpy(buf, &d, sizeof(double));
+ }
+ break;
+ default:
break;
}
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index f8f751fa746..ae6e6d05d80 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -925,7 +925,7 @@ loop:
<< " records, the sort queue has "
<< UT_LIST_GET_LEN(psort_info->fts_doc_list)
<< " records. But sort cannot get the next"
- " records";
+ " records during alter table " << table->name;
goto exit;
}
} else if (psort_info->state == FTS_PARENT_EXITING) {
@@ -1221,7 +1221,9 @@ row_merge_write_fts_word(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ib::error() << "Failed to write word to FTS auxiliary"
- " index table, error " << error;
+ " index table "
+ << ins_ctx->btr_bulk->table_name()
+ << ", error " << error;
ret = error;
}
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 2468b133387..7c56713a6c1 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1818,7 +1818,8 @@ PageConverter::update_records(
if (deleted || clust_index) {
m_offsets = rec_get_offsets(
- rec, m_index->m_srv_index, m_offsets, true,
+ rec, m_index->m_srv_index, m_offsets,
+ m_index->m_srv_index->n_core_fields,
ULINT_UNDEFINED, &m_heap);
}
@@ -3351,6 +3352,66 @@ struct fil_iterator_t {
byte* crypt_io_buffer; /*!< IO buffer when encrypted */
};
+
+/** InnoDB writes page by page when there is page compressed
+tablespace involved. It does help to save the disk space when
+punch hole is enabled
+@param iter Tablespace iterator
+@param full_crc32 whether the file is in the full_crc32 format
+@param write_request Request to write into the file
+@param offset offset of the file to be written
+@param writeptr buffer to be written
+@param n_bytes number of bytes to be written
+@param try_punch_only Try the range punch only because the
+ current range is full of empty pages
+@return DB_SUCCESS */
+static
+dberr_t fil_import_compress_fwrite(const fil_iterator_t &iter,
+ bool full_crc32,
+ const IORequest &write_request,
+ os_offset_t offset,
+ const byte *writeptr,
+ ulint n_bytes,
+ bool try_punch_only= false)
+{
+ if (dberr_t err= os_file_punch_hole(iter.file, offset, n_bytes))
+ return err;
+
+ if (try_punch_only)
+ return DB_SUCCESS;
+
+ for (ulint j= 0; j < n_bytes; j+= srv_page_size)
+ {
+ /* Read the original data length from block and
+ safer to read FIL_PAGE_COMPRESSED_SIZE because it
+ is not encrypted*/
+ ulint n_write_bytes= srv_page_size;
+ if (j || offset)
+ {
+ n_write_bytes= mach_read_from_2(writeptr + j + FIL_PAGE_DATA);
+ const unsigned ptype= mach_read_from_2(writeptr + j + FIL_PAGE_TYPE);
+ /* Ignore the empty page */
+ if (ptype == 0 && n_write_bytes == 0)
+ continue;
+ if (full_crc32)
+ n_write_bytes= buf_page_full_crc32_size(writeptr + j,
+ nullptr, nullptr);
+ else
+ {
+ n_write_bytes+= ptype == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
+ ? FIL_PAGE_DATA + FIL_PAGE_ENCRYPT_COMP_METADATA_LEN
+ : FIL_PAGE_DATA + FIL_PAGE_COMP_METADATA_LEN;
+ }
+ }
+
+ if (dberr_t err= os_file_write(write_request, iter.filepath, iter.file,
+ writeptr + j, offset + j, n_write_bytes))
+ return err;
+ }
+
+ return DB_SUCCESS;
+}
+
/********************************************************************//**
TODO: This can be made parallel trivially by chunking up the file and creating
a callback per thread. . Main benefit will be to use multiple CPUs for
@@ -3396,7 +3457,10 @@ fil_iterate(
/* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless
copying for non-index pages. Unfortunately, it is
required by buf_zip_decompress() */
- dberr_t err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
+ bool page_compressed = false;
+ bool punch_hole = true;
+ const IORequest write_request(IORequest::WRITE);
for (offset = iter.start; offset < iter.end; offset += n_bytes) {
if (callback.is_interrupted()) {
@@ -3474,7 +3538,7 @@ page_corrupted:
src + FIL_PAGE_SPACE_ID);
}
- const bool page_compressed =
+ page_compressed =
(full_crc32
&& fil_space_t::is_compressed(
callback.get_space_flags())
@@ -3667,13 +3731,23 @@ not_encrypted:
}
}
- /* A page was updated in the set, write back to disk. */
- if (updated) {
- IORequest write_request(IORequest::WRITE);
+ if (page_compressed && punch_hole) {
+ err = fil_import_compress_fwrite(
+ iter, full_crc32, write_request, offset,
+ writeptr, n_bytes, !updated);
- err = os_file_write(write_request,
- iter.filepath, iter.file,
- writeptr, offset, n_bytes);
+ if (err != DB_SUCCESS) {
+ punch_hole = false;
+ if (updated) {
+ goto normal_write;
+ }
+ }
+ } else if (updated) {
+ /* A page was updated in the set, write back to disk. */
+normal_write:
+ err = os_file_write(
+ write_request, iter.filepath, iter.file,
+ writeptr, offset, n_bytes);
if (err != DB_SUCCESS) {
goto func_exit;
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 8f3024c2d48..3d3407e3a88 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2020, MariaDB Corporation.
+Copyright (c) 2016, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -883,7 +883,7 @@ row_ins_foreign_fill_virtual(
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
const rec_offs* offsets =
- rec_get_offsets(rec, index, offsets_, true,
+ rec_get_offsets(rec, index, offsets_, index->n_core_fields,
ULINT_UNDEFINED, &cascade->heap);
TABLE* mysql_table= NULL;
upd_t* update = cascade->update;
@@ -894,7 +894,7 @@ row_ins_foreign_fill_virtual(
update->old_vrow = row_build(
ROW_COPY_DATA, index, rec,
offsets, index->table, NULL, NULL,
- &ext, cascade->heap);
+ &ext, update->heap);
n_diff = update->n_fields;
if (index->table->vc_templ == NULL) {
@@ -1197,7 +1197,8 @@ row_ins_foreign_check_on_constraint(
if (table->fts) {
doc_id = fts_get_doc_id_from_rec(
clust_rec, clust_index,
- rec_get_offsets(clust_rec, clust_index, NULL, true,
+ rec_get_offsets(clust_rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &tmp_heap));
}
@@ -1639,7 +1640,8 @@ row_ins_check_foreign_constraint(
continue;
}
- offsets = rec_get_offsets(rec, check_index, offsets, true,
+ offsets = rec_get_offsets(rec, check_index, offsets,
+ check_index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (page_rec_is_supremum(rec)) {
@@ -2127,7 +2129,8 @@ row_ins_scan_sec_index_for_duplicate(
continue;
}
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &offsets_heap);
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -2264,7 +2267,8 @@ row_ins_duplicate_error_in_clust_online(
ut_ad(!cursor->index->is_instant());
if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
- *offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ cursor->index->n_fields,
ULINT_UNDEFINED, heap);
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
if (err != DB_SUCCESS) {
@@ -2275,7 +2279,8 @@ row_ins_duplicate_error_in_clust_online(
rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
- *offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ cursor->index->n_fields,
ULINT_UNDEFINED, heap);
err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
}
@@ -2331,7 +2336,7 @@ row_ins_duplicate_error_in_clust(
if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets,
- true,
+ cursor->index->n_core_fields,
ULINT_UNDEFINED, &heap);
/* We set a lock on the possible duplicate: this
@@ -2374,6 +2379,18 @@ row_ins_duplicate_error_in_clust(
duplicate:
trx->error_info = cursor->index;
err = DB_DUPLICATE_KEY;
+ if (cursor->index->table->versioned()
+ && entry->vers_history_row())
+ {
+ ulint trx_id_len;
+ byte *trx_id = rec_get_nth_field(
+ rec, offsets, n_unique,
+ &trx_id_len);
+ ut_ad(trx_id_len == DATA_TRX_ID_LEN);
+ if (trx->id == trx_read_trx_id(trx_id)) {
+ err = DB_FOREIGN_DUPLICATE_KEY;
+ }
+ }
goto func_exit;
}
}
@@ -2385,7 +2402,7 @@ duplicate:
if (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets,
- true,
+ cursor->index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (trx->duplicates) {
@@ -2502,7 +2519,7 @@ row_ins_index_entry_big_rec(
btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE,
&pcur, &mtr);
rec = btr_pcur_get_rec(&pcur);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, heap);
DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
@@ -3058,7 +3075,8 @@ row_ins_sec_index_entry_low(
prefix, we must convert the insert into a modify of an
existing record */
offsets = rec_get_offsets(
- btr_cur_get_rec(&cursor), index, offsets, true,
+ btr_cur_get_rec(&cursor), index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &offsets_heap);
err = row_ins_sec_index_entry_by_modify(
@@ -3314,7 +3332,8 @@ row_ins_index_entry(
dtuple_t* entry, /*!< in/out: index entry to insert */
que_thr_t* thr) /*!< in: query thread */
{
- ut_ad(thr_get_trx(thr)->id || index->table->no_rollback());
+ ut_ad(thr_get_trx(thr)->id || index->table->no_rollback()
+ || index->table->is_temporary());
DBUG_EXECUTE_IF("row_ins_index_entry_timeout", {
DBUG_SET("-d,row_ins_index_entry_timeout");
@@ -3588,6 +3607,16 @@ row_ins_get_row_from_select(
}
}
+inline
+bool ins_node_t::vers_history_row() const
+{
+ if (!table->versioned())
+ return false;
+ dfield_t* row_end = dtuple_get_nth_field(row, table->vers_end);
+ return row_end->vers_history_row();
+}
+
+
/***********************************************************//**
Inserts a row to a table.
@return DB_SUCCESS if operation successfully completed, else error
@@ -3626,12 +3655,31 @@ row_ins(
ut_ad(node->state == INS_NODE_INSERT_ENTRIES);
while (node->index != NULL) {
- if (node->index->type != DICT_FTS) {
+ dict_index_t *index = node->index;
+ /*
+ We do not insert history rows into FTS_DOC_ID_INDEX because
+ it is unique by FTS_DOC_ID only and we do not want to add
+ row_end to unique key. Fulltext field works the way new
+ FTS_DOC_ID is created on every fulltext UPDATE, so holding only
+ FTS_DOC_ID for history is enough.
+ */
+ const unsigned type = index->type;
+ if (index->type & DICT_FTS) {
+ } else if (!(type & DICT_UNIQUE) || index->n_uniq > 1
+ || !node->vers_history_row()) {
+
dberr_t err = row_ins_index_entry_step(node, thr);
if (err != DB_SUCCESS) {
DBUG_RETURN(err);
}
+ } else {
+ /* Unique indexes with system versioning must contain
+ the version end column. The only exception is a hidden
+ FTS_DOC_ID_INDEX that InnoDB may create on a hidden or
+ user-created FTS_DOC_ID column. */
+ ut_ad(!strcmp(index->name, FTS_DOC_ID_INDEX_NAME));
+ ut_ad(!strcmp(index->fields[0].name, FTS_DOC_ID_COL_NAME));
}
node->index = dict_table_get_next_index(node->index);
@@ -3713,13 +3761,17 @@ row_ins_step(
}
if (UNIV_LIKELY(!node->table->skip_alter_undo)) {
- trx_write_trx_id(&node->sys_buf[DATA_ROW_ID_LEN], trx->id);
+ trx_write_trx_id(&node->sys_buf[DATA_TRX_ID_LEN], trx->id);
}
if (node->state == INS_NODE_SET_IX_LOCK) {
node->state = INS_NODE_ALLOC_ROW_ID;
+ if (node->table->is_temporary()) {
+ node->trx_id = trx->id;
+ }
+
/* It may be that the current session has not yet started
its transaction, or it has been committed: */
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 45ec027beb5..c0396c33cc4 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -353,7 +353,7 @@ row_log_online_op(
row_merge_buf_encode(), because here we do not encode
extra_size+1 (and reserve 0 as the end-of-chunk marker). */
- size = rec_get_converted_size_temp(
+ size = rec_get_converted_size_temp<false>(
index, tuple->fields, tuple->n_fields, &extra_size);
ut_ad(size >= extra_size);
ut_ad(size <= sizeof log->tail.buf);
@@ -401,7 +401,7 @@ row_log_online_op(
*b++ = (byte) extra_size;
}
- rec_convert_dtuple_to_temp(
+ rec_convert_dtuple_to_temp<false>(
b + extra_size, index, tuple->fields, tuple->n_fields);
b += size;
@@ -743,7 +743,7 @@ row_log_table_delete(
old_pk, old_pk->n_fields - 2)->len);
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len);
- old_pk_size = rec_get_converted_size_temp(
+ old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
@@ -756,7 +756,7 @@ row_log_table_delete(
*b++ = ROW_T_DELETE;
*b++ = static_cast<byte>(old_pk_extra_size);
- rec_convert_dtuple_to_temp(
+ rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields);
@@ -856,7 +856,7 @@ row_log_table_low_redundant(
rec_comp_status_t status = is_instant
? REC_STATUS_INSTANT : REC_STATUS_ORDINARY;
- size = rec_get_converted_size_temp(
+ size = rec_get_converted_size_temp<true>(
index, tuple->fields, tuple->n_fields, &extra_size, status);
if (is_instant) {
size++;
@@ -876,7 +876,7 @@ row_log_table_low_redundant(
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len);
- old_pk_size = rec_get_converted_size_temp(
+ old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
@@ -893,7 +893,7 @@ row_log_table_low_redundant(
if (old_pk_size) {
*b++ = static_cast<byte>(old_pk_extra_size);
- rec_convert_dtuple_to_temp(
+ rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields);
b += old_pk_size;
@@ -916,7 +916,7 @@ row_log_table_low_redundant(
*b = status;
}
- rec_convert_dtuple_to_temp(
+ rec_convert_dtuple_to_temp<true>(
b + extra_size, index, tuple->fields, tuple->n_fields,
status);
b += size;
@@ -1038,7 +1038,7 @@ row_log_table_low(
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len);
- old_pk_size = rec_get_converted_size_temp(
+ old_pk_size = rec_get_converted_size_temp<false>(
new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
@@ -1054,7 +1054,7 @@ row_log_table_low(
if (old_pk_size) {
*b++ = static_cast<byte>(old_pk_extra_size);
- rec_convert_dtuple_to_temp(
+ rec_convert_dtuple_to_temp<false>(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields);
b += old_pk_size;
@@ -1259,7 +1259,8 @@ row_log_table_get_pk(
if (!offsets) {
offsets = rec_get_offsets(
- rec, index, NULL, true,
+ rec, index, nullptr,
+ index->n_core_fields,
index->db_trx_id() + 1, heap);
}
@@ -1309,7 +1310,8 @@ row_log_table_get_pk(
}
if (!offsets) {
- offsets = rec_get_offsets(rec, index, NULL, true,
+ offsets = rec_get_offsets(rec, index, nullptr,
+ index->n_core_fields,
ULINT_UNDEFINED, heap);
}
@@ -1986,7 +1988,8 @@ all_done:
return(DB_SUCCESS);
}
- offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL, true,
+ offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, nullptr,
+ index->n_core_fields,
ULINT_UNDEFINED, &offsets_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets));
@@ -2184,7 +2187,7 @@ func_exit_committed:
/* Prepare to update (or delete) the record. */
rec_offs* cur_offsets = rec_get_offsets(
- btr_pcur_get_rec(&pcur), index, NULL, true,
+ btr_pcur_get_rec(&pcur), index, nullptr, index->n_core_fields,
ULINT_UNDEFINED, &offsets_heap);
if (!log->same_pk) {
@@ -4045,3 +4048,9 @@ row_log_apply(
DBUG_RETURN(error);
}
+
+unsigned row_log_get_n_core_fields(const dict_index_t *index)
+{
+ ut_ad(index->online_log);
+ return index->online_log->n_core_fields;
+}
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 3939d48ea9a..475424b2155 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -308,7 +308,7 @@ row_merge_buf_encode(
ulint size;
ulint extra_size;
- size = rec_get_converted_size_temp(
+ size = rec_get_converted_size_temp<false>(
index, entry->fields, n_fields, &extra_size);
ut_ad(size >= extra_size);
@@ -321,7 +321,7 @@ row_merge_buf_encode(
*(*b)++ = (byte) (extra_size + 1);
}
- rec_convert_dtuple_to_temp(*b + extra_size, index,
+ rec_convert_dtuple_to_temp<false>(*b + extra_size, index,
entry->fields, n_fields);
*b += size;
@@ -796,7 +796,7 @@ row_merge_buf_add(
ulint size;
ulint extra;
- size = rec_get_converted_size_temp(
+ size = rec_get_converted_size_temp<false>(
index, entry->fields, n_fields, &extra);
ut_ad(data_size + extra_size == size);
@@ -2037,7 +2037,8 @@ end_of_index:
rec = page_cur_get_rec(cur);
if (online) {
- offsets = rec_get_offsets(rec, clust_index, NULL, true,
+ offsets = rec_get_offsets(rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &row_heap);
rec_trx_id = row_get_rec_trx_id(rec, clust_index,
offsets);
@@ -2129,7 +2130,8 @@ end_of_index:
duplicate keys. */
continue;
} else {
- offsets = rec_get_offsets(rec, clust_index, NULL, true,
+ offsets = rec_get_offsets(rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &row_heap);
/* This is a locking ALTER TABLE.
@@ -3824,17 +3826,20 @@ row_merge_drop_indexes_dict(
trx->op_info = "";
}
-/*********************************************************************//**
-Drop indexes that were created before an error occurred.
+/** Drop indexes that were created before an error occurred.
The data dictionary must have been locked exclusively by the caller,
-because the transaction will not be committed. */
+because the transaction will not be committed.
+@param trx dictionary transaction
+@param table table containing the indexes
+@param locked True if table is locked,
+ false - may need to do lazy drop
+@param alter_trx Alter table transaction */
void
row_merge_drop_indexes(
-/*===================*/
- trx_t* trx, /*!< in/out: dictionary transaction */
- dict_table_t* table, /*!< in/out: table containing the indexes */
- ibool locked) /*!< in: TRUE=table locked,
- FALSE=may need to do a lazy drop */
+ trx_t* trx,
+ dict_table_t* table,
+ bool locked,
+ const trx_t* alter_trx)
{
dict_index_t* index;
dict_index_t* next_index;
@@ -3859,7 +3864,7 @@ row_merge_drop_indexes(
A concurrent purge will be prevented by dict_sys.latch. */
if (!locked && (table->get_ref_count() > 1
- || UT_LIST_GET_FIRST(table->locks))) {
+ || table->has_lock_other_than(alter_trx))) {
/* We will have to drop the indexes later, when the
table is guaranteed to be no longer in use. Mark the
indexes as incomplete and corrupted, so that other
@@ -3895,6 +3900,8 @@ row_merge_drop_indexes(
ut_ad(prev);
ut_a(table->fts);
fts_drop_index(table, index, trx);
+ row_merge_drop_index_dict(
+ trx, index->id);
/* We can remove a DICT_FTS
index from the cache, because
we do not allow ADD FULLTEXT INDEX
@@ -4407,6 +4414,7 @@ row_merge_create_index(
dict_index_t* index;
ulint n_fields = index_def->n_fields;
ulint i;
+ ulint n_add_vcol = 0;
DBUG_ENTER("row_merge_create_index");
@@ -4431,7 +4439,7 @@ row_merge_create_index(
ut_ad(ifield->col_no >= table->n_v_def);
name = add_v->v_col_name[
ifield->col_no - table->n_v_def];
- index->has_new_v_col = true;
+ n_add_vcol++;
} else {
name = dict_table_get_v_col_name(
table, ifield->col_no);
@@ -4443,6 +4451,10 @@ row_merge_create_index(
dict_mem_index_add_field(index, name, ifield->prefix_len);
}
+ if (n_add_vcol) {
+ index->assign_new_v_col(n_add_vcol);
+ }
+
DBUG_RETURN(index);
}
@@ -4461,7 +4473,7 @@ row_merge_is_index_usable(
}
return(!index->is_corrupted()
- && (index->table->is_temporary()
+ && (index->table->is_temporary() || index->table->no_rollback()
|| index->trx_id == 0
|| !trx->read_view.is_open()
|| trx->read_view.changes_visible(
@@ -4857,10 +4869,6 @@ wait_again:
buf, i + 1, n_indexes);
}
- DBUG_EXECUTE_IF(
- "ib_merge_wait_after_sort",
- os_thread_sleep(20000000);); /* 20 sec */
-
if (error == DB_SUCCESS) {
BtrBulk btr_bulk(sort_idx, trx,
trx->get_flush_observer());
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index ea0719a3fc8..a6c75f7f450 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -689,6 +689,7 @@ row_mysql_handle_errors(
dberr_t err;
DBUG_ENTER("row_mysql_handle_errors");
+ DEBUG_SYNC_C("row_mysql_handle_errors");
handle_new_error:
err = trx->error_state;
@@ -964,9 +965,6 @@ row_create_prebuilt(
prebuilt->fts_doc_id_in_read_set = 0;
prebuilt->blob_heap = NULL;
- prebuilt->m_no_prefetch = false;
- prebuilt->m_read_virtual_key = false;
-
DBUG_RETURN(prebuilt);
}
@@ -1804,12 +1802,11 @@ row_update_for_mysql(row_prebuilt_t* prebuilt)
clust_index = dict_table_get_first_index(table);
- if (prebuilt->pcur->btr_cur.index == clust_index) {
- btr_pcur_copy_stored_position(node->pcur, prebuilt->pcur);
- } else {
- btr_pcur_copy_stored_position(node->pcur,
- prebuilt->clust_pcur);
- }
+ btr_pcur_copy_stored_position(node->pcur,
+ prebuilt->pcur->btr_cur.index
+ == clust_index
+ ? prebuilt->pcur
+ : prebuilt->clust_pcur);
ut_a(node->pcur->rel_pos == BTR_PCUR_ON);
@@ -2027,7 +2024,8 @@ row_unlock_for_mysql(
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
rec_trx_id = row_get_rec_trx_id(rec, index, offsets);
@@ -2105,10 +2103,18 @@ row_mysql_unfreeze_data_dictionary(
@param buf Buffer to hold start time data */
void thd_get_query_start_data(THD *thd, char *buf);
-/** Function restores btr_pcur_t, creates dtuple_t from rec_t,
-sets row_end = CURRENT_TIMESTAMP/trx->id, inserts it to a table and updates
-table statistics.
-This is used in UPDATE CASCADE/SET NULL of a system versioning table.
+/** Insert history row when evaluating foreign key referential action.
+
+1. Create new dtuple_t 'row' from node->historical_row;
+2. Update its row_end to current timestamp;
+3. Insert it to a table;
+4. Update table statistics.
+
+This is used in UPDATE CASCADE/SET NULL of a system versioned referenced table.
+
+node->historical_row: dtuple_t containing pointers of row changed by refertial
+action.
+
@param[in] thr current query thread
@param[in] node a node which just updated a row in a foreign table
@return DB_SUCCESS or some error */
@@ -2118,11 +2124,19 @@ static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node)
dfield_t* row_end;
char row_end_data[8];
dict_table_t* table = node->table;
+ const unsigned zip_size = table->space->zip_size();
ut_ad(table->versioned());
- dtuple_t* row = node->historical_row;
- ut_ad(row);
- node->historical_row = NULL;
+ dtuple_t* row;
+ const ulint n_cols = dict_table_get_n_cols(table);
+ const ulint n_v_cols = dict_table_get_n_v_cols(table);
+
+ ut_ad(n_cols == dtuple_get_n_fields(node->historical_row));
+ ut_ad(n_v_cols == dtuple_get_n_v_fields(node->historical_row));
+
+ row = dtuple_create_with_vcol(node->historical_heap, n_cols, n_v_cols);
+
+ dict_table_copy_types(row, table);
ins_node_t* insert_node =
ins_node_create(INS_DIRECT, table, node->historical_heap);
@@ -2135,6 +2149,40 @@ static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node)
insert_node->common.parent = thr;
ins_node_set_new_row(insert_node, row);
+ ut_ad(n_cols > DATA_N_SYS_COLS);
+ // Exclude DB_ROW_ID, DB_TRX_ID, DB_ROLL_PTR
+ for (ulint i = 0; i < n_cols - DATA_N_SYS_COLS; i++) {
+ dfield_t *src= dtuple_get_nth_field(node->historical_row, i);
+ dfield_t *dst= dtuple_get_nth_field(row, i);
+ dfield_copy(dst, src);
+ if (dfield_is_ext(src)) {
+ byte *field_data
+ = static_cast<byte*>(dfield_get_data(src));
+ ulint ext_len;
+ ulint field_len = dfield_get_len(src);
+
+ ut_a(field_len >= BTR_EXTERN_FIELD_REF_SIZE);
+
+ ut_a(memcmp(field_data + field_len
+ - BTR_EXTERN_FIELD_REF_SIZE,
+ field_ref_zero,
+ BTR_EXTERN_FIELD_REF_SIZE));
+
+ byte *data = btr_copy_externally_stored_field(
+ &ext_len, field_data, zip_size, field_len,
+ node->historical_heap);
+ dfield_set_data(dst, data, ext_len);
+ }
+ }
+
+ for (ulint i = 0; i < n_v_cols; i++) {
+ dfield_t *dst= dtuple_get_nth_v_field(row, i);
+ dfield_t *src= dtuple_get_nth_v_field(node->historical_row, i);
+ dfield_copy(dst, src);
+ }
+
+ node->historical_row = NULL;
+
row_end = dtuple_get_nth_field(row, table->vers_end);
if (dict_table_get_nth_col(table, table->vers_end)->vers_native()) {
mach_write_to_8(row_end_data, trx->id);
@@ -4292,6 +4340,8 @@ row_rename_table_for_mysql(
"END;\n"
, FALSE, trx);
+ ut_ad(err != DB_DUPLICATE_KEY);
+
/* SYS_TABLESPACES and SYS_DATAFILES need to be updated if
the table is in a single-table tablespace. */
if (err != DB_SUCCESS || !dict_table_is_file_per_table(table)) {
@@ -4739,7 +4789,7 @@ func_exit:
rec = buf + mach_read_from_4(buf);
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_, index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (prev_entry != NULL) {
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index e9eaf27977d..cbf4ddce279 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -125,8 +125,9 @@ row_purge_remove_clust_if_poss_low(
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
mem_heap_t* heap = NULL;
- rec_offs* offsets = rec_get_offsets(
- rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
+ rec_offs* offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
bool success = true;
if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) {
@@ -732,7 +733,7 @@ row_purge_skip_uncommitted_virtual_index(
not support LOCK=NONE when adding an index on newly
added virtual column.*/
while (index != NULL && dict_index_has_virtual(index)
- && !index->is_committed() && index->has_new_v_col) {
+ && !index->is_committed() && index->has_new_v_col()) {
index = dict_table_get_next_index(index);
}
}
@@ -806,7 +807,8 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
rec_offs offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2];
rec_offs_init(offsets_);
rec_offs* offsets = rec_get_offsets(
- rec, index, offsets_, true, trx_id_pos + 2, &heap);
+ rec, index, offsets_, index->n_core_fields,
+ trx_id_pos + 2, &heap);
ut_ad(heap == NULL);
ut_ad(dict_index_get_nth_field(index, trx_id_pos)
@@ -1364,7 +1366,7 @@ purge_node_t::validate_pcur()
dict_index_t* clust_index = pcur.btr_cur.index;
rec_offs* offsets = rec_get_offsets(
- pcur.old_rec, clust_index, NULL, true,
+ pcur.old_rec, clust_index, NULL, pcur.old_n_core_fields,
pcur.old_n_fields, &heap);
/* Here we are comparing the purge ref record and the stored initial
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index f37b810b7eb..f0e5385be85 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2020, MariaDB Corporation.
+Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -295,12 +295,14 @@ row_build_index_entry_low(
continue;
}
+ ut_ad(!(index->type & DICT_FTS));
+
ulint len = dfield_get_len(dfield);
if (f.prefix_len == 0
&& (!dfield_is_ext(dfield)
|| dict_index_is_clust(index))) {
- /* The dfield_copy() above suffices for
+ /* The *dfield = *dfield2 above suffices for
columns that are stored in-page, or for
clustered index record columns that are not
part of a column prefix in the PRIMARY KEY. */
@@ -439,7 +441,8 @@ row_build_low(
ut_ad(!col_map || col_table);
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &tmp_heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -1003,7 +1006,7 @@ row_build_row_ref(
ut_ad(heap != NULL);
ut_ad(!dict_index_is_clust(index));
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &tmp_heap);
/* Secondary indexes must not contain externally stored columns. */
ut_ad(!rec_offs_any_extern(offsets));
@@ -1112,7 +1115,8 @@ row_build_row_ref_in_tuple(
ut_ad(clust_index);
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 8b03ef85ec9..74947b78774 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -2,7 +2,7 @@
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -81,9 +81,9 @@ is alphabetically the same as the corresponding BLOB column in the clustered
index record.
NOTE: the comparison is NOT done as a binary comparison, but character
fields are compared with collation!
-@return TRUE if the columns are equal */
+@return whether the columns are equal */
static
-ibool
+bool
row_sel_sec_rec_is_for_blob(
/*========================*/
ulint mtype, /*!< in: main type */
@@ -102,19 +102,18 @@ row_sel_sec_rec_is_for_blob(
const byte* sec_field, /*!< in: column in secondary index */
ulint sec_len, /*!< in: length of sec_field */
ulint prefix_len, /*!< in: index column prefix length
- in bytes */
+ in bytes, or 0 for full column */
dict_table_t* table) /*!< in: table */
{
ulint len;
- byte buf[REC_VERSION_56_MAX_INDEX_COL_LEN];
+ byte buf[REC_VERSION_56_MAX_INDEX_COL_LEN + 1];
/* This function should never be invoked on tables in
ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPACT, because they
should always contain enough prefix in the clustered index record. */
ut_ad(dict_table_has_atomic_blobs(table));
ut_a(clust_len >= BTR_EXTERN_FIELD_REF_SIZE);
- ut_ad(prefix_len >= sec_len);
- ut_ad(prefix_len > 0);
+ ut_ad(!prefix_len || prefix_len >= sec_len);
ut_a(prefix_len <= sizeof buf);
if (!memcmp(clust_field + clust_len - BTR_EXTERN_FIELD_REF_SIZE,
@@ -123,11 +122,12 @@ row_sel_sec_rec_is_for_blob(
This record should only be seen by
recv_recovery_rollback_active() or any
TRX_ISO_READ_UNCOMMITTED transactions. */
- return(FALSE);
+ return false;
}
len = btr_copy_externally_stored_field_prefix(
- buf, prefix_len, table->space->zip_size(),
+ buf, prefix_len ? prefix_len : sizeof buf,
+ table->space->zip_size(),
clust_field, clust_len);
if (len == 0) {
@@ -136,11 +136,18 @@ row_sel_sec_rec_is_for_blob(
referring to this clustered index record, because
btr_free_externally_stored_field() is called after all
secondary index entries of the row have been purged. */
- return(FALSE);
+ return false;
}
- len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen,
- prefix_len, len, (const char*) buf);
+ if (prefix_len) {
+ len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen,
+ prefix_len, len,
+ reinterpret_cast<const char*>
+ (buf));
+ } else if (len >= sizeof buf) {
+ ut_ad("too long column" == 0);
+ return false;
+ }
return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len));
}
@@ -203,9 +210,11 @@ row_sel_sec_rec_is_for_clust_rec(
ib_vcol_row vc(heap);
clust_offs = rec_get_offsets(clust_rec, clust_index, clust_offs,
- true, ULINT_UNDEFINED, &heap);
+ clust_index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
sec_offs = rec_get_offsets(sec_rec, sec_index, sec_offs,
- true, ULINT_UNDEFINED, &heap);
+ sec_index->n_fields,
+ ULINT_UNDEFINED, &heap);
n = dict_index_get_n_ordering_defined_by_user(sec_index);
@@ -215,12 +224,13 @@ row_sel_sec_rec_is_for_clust_rec(
ulint clust_pos = 0;
ulint clust_len = 0;
ulint len;
- bool is_virtual;
ifield = dict_index_get_nth_field(sec_index, i);
col = dict_field_get_col(ifield);
- is_virtual = col->is_virtual();
+ sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len);
+
+ const bool is_virtual = col->is_virtual();
/* For virtual column, its value will need to be
reconstructed from base column in cluster index */
@@ -252,43 +262,55 @@ row_sel_sec_rec_is_for_clust_rec(
innobase_report_computed_value_failed(row);
return DB_COMPUTE_VALUE_FAILED;
}
- clust_len = vfield->len;
+ len = clust_len = vfield->len;
clust_field = static_cast<byte*>(vfield->data);
} else {
clust_pos = dict_col_get_clust_pos(col, clust_index);
+
clust_field = rec_get_nth_cfield(
clust_rec, clust_index, clust_offs,
clust_pos, &clust_len);
- }
-
- sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len);
-
- len = clust_len;
-
- if (ifield->prefix_len > 0 && len != UNIV_SQL_NULL
- && sec_len != UNIV_SQL_NULL && !is_virtual) {
+ if (clust_len == UNIV_SQL_NULL) {
+ if (sec_len == UNIV_SQL_NULL) {
+ continue;
+ }
+ return DB_SUCCESS;
+ }
+ if (sec_len == UNIV_SQL_NULL) {
+ return DB_SUCCESS;
+ }
+ len = clust_len;
if (rec_offs_nth_extern(clust_offs, clust_pos)) {
len -= BTR_EXTERN_FIELD_REF_SIZE;
}
- len = dtype_get_at_most_n_mbchars(
- col->prtype, col->mbminlen, col->mbmaxlen,
- ifield->prefix_len, len, (char*) clust_field);
-
- if (rec_offs_nth_extern(clust_offs, clust_pos)
- && len < sec_len) {
- if (!row_sel_sec_rec_is_for_blob(
- col->mtype, col->prtype,
- col->mbminlen, col->mbmaxlen,
- clust_field, clust_len,
- sec_field, sec_len,
- ifield->prefix_len,
- clust_index->table)) {
- return DB_SUCCESS;
+ if (ulint prefix_len = ifield->prefix_len) {
+ len = dtype_get_at_most_n_mbchars(
+ col->prtype, col->mbminlen,
+ col->mbmaxlen, prefix_len, len,
+ reinterpret_cast<const char*>(
+ clust_field));
+ if (len < sec_len) {
+ goto check_for_blob;
}
+ } else {
+check_for_blob:
+ if (rec_offs_nth_extern(clust_offs,
+ clust_pos)) {
+ if (!row_sel_sec_rec_is_for_blob(
+ col->mtype, col->prtype,
+ col->mbminlen,
+ col->mbmaxlen,
+ clust_field, clust_len,
+ sec_field, sec_len,
+ prefix_len,
+ clust_index->table)) {
+ return DB_SUCCESS;
+ }
- continue;
+ continue;
+ }
}
}
@@ -908,7 +930,9 @@ row_sel_get_clust_rec(
offsets = rec_get_offsets(rec,
btr_pcur_get_btr_cur(&plan->pcur)->index,
- offsets, true, ULINT_UNDEFINED, &heap);
+ offsets,
+ btr_pcur_get_btr_cur(&plan->pcur)->index
+ ->n_core_fields, ULINT_UNDEFINED, &heap);
row_build_row_ref_fast(plan->clust_ref, plan->clust_map, rec, offsets);
@@ -943,7 +967,8 @@ row_sel_get_clust_rec(
goto err_exit;
}
- offsets = rec_get_offsets(clust_rec, index, offsets, true,
+ offsets = rec_get_offsets(clust_rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (!node->read_view) {
@@ -1163,7 +1188,8 @@ re_scan:
rec = btr_pcur_get_rec(pcur);
my_offsets = offsets_;
- my_offsets = rec_get_offsets(rec, index, my_offsets, true,
+ my_offsets = rec_get_offsets(rec, index, my_offsets,
+ index->n_fields,
ULINT_UNDEFINED, &heap);
/* No match record */
@@ -1186,7 +1212,7 @@ re_scan:
rtr_rec_t* rtr_rec = &(*it);
my_offsets = rec_get_offsets(
- rtr_rec->r_rec, index, my_offsets, true,
+ rtr_rec->r_rec, index, my_offsets, index->n_fields,
ULINT_UNDEFINED, &heap);
err = lock_sec_rec_read_check_and_lock(
@@ -1495,7 +1521,7 @@ exhausted:
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (dict_index_is_clust(index)) {
@@ -1711,7 +1737,7 @@ rec_loop:
trx = thr_get_trx(thr);
offsets = rec_get_offsets(next_rec, index, offsets,
- true,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
/* If innodb_locks_unsafe_for_binlog option is used
@@ -1776,7 +1802,8 @@ skip_lock:
ulint lock_type;
trx_t* trx;
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
trx = thr_get_trx(thr);
@@ -1863,7 +1890,7 @@ skip_lock:
/* PHASE 3: Get previous version in a consistent read */
cons_read_requires_clust_rec = FALSE;
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (consistent_read) {
@@ -1894,7 +1921,8 @@ skip_lock:
exhausted. */
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
/* Fetch the columns needed in
@@ -3037,8 +3065,7 @@ static bool row_sel_store_mysql_rec(
search or virtual key read is not requested. */
if (!rec_clust
|| !prebuilt->index->has_virtual()
- || (!prebuilt->read_just_key
- && !prebuilt->m_read_virtual_key)) {
+ || !prebuilt->read_just_key) {
/* Initialize the NULL bit. */
if (templ->mysql_null_bit_mask) {
mysql_rec[templ->mysql_null_byte_offset]
@@ -3056,23 +3083,8 @@ static bool row_sel_store_mysql_rec(
const dfield_t* dfield = dtuple_get_nth_v_field(
vrow, col->v_pos);
- /* If this is a partitioned table, it might request
- InnoDB to fill out virtual column data for serach
- index key values while other non key columns are also
- getting selected. The non-key virtual columns may
- not be materialized and we should skip them. */
if (dfield_get_type(dfield)->mtype == DATA_MISSING) {
-#ifdef UNIV_DEBUG
- ulint prefix;
-#endif /* UNIV_DEBUG */
- ut_ad(prebuilt->m_read_virtual_key);
-
- /* If it is part of index key the data should
- have been materialized. */
- ut_ad(dict_index_get_nth_col_or_prefix_pos(
- prebuilt->index, col->v_pos, false,
- true, &prefix) == ULINT_UNDEFINED);
-
+ ut_ad("no ha_innopart in MariaDB" == 0);
continue;
}
@@ -3192,7 +3204,8 @@ class Row_sel_get_clust_rec_for_mysql
ut_ad(rec_offs_validate(cached_clust_rec, index, offsets));
ut_ad(index->first_user_field() <= rec_offs_n_fields(offsets));
- ut_ad(vers_offs == rec_get_offsets(cached_old_vers, index, vers_offs, true,
+ ut_ad(vers_offs == rec_get_offsets(cached_old_vers, index, vers_offs,
+ index->n_core_fields,
index->db_trx_id(), &heap));
ut_ad(!heap);
for (auto n= index->db_trx_id(); n--; )
@@ -3379,7 +3392,8 @@ Row_sel_get_clust_rec_for_mysql::operator()(
goto func_exit;
}
- *offsets = rec_get_offsets(clust_rec, clust_index, *offsets, true,
+ *offsets = rec_get_offsets(clust_rec, clust_index, *offsets,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, offset_heap);
if (prebuilt->select_lock_type != LOCK_NONE) {
@@ -3453,7 +3467,8 @@ Row_sel_get_clust_rec_for_mysql::operator()(
ut_d(check_eq(clust_index, *offsets));
*offsets = rec_get_offsets(
old_vers, clust_index, *offsets,
- true, ULINT_UNDEFINED, offset_heap);
+ clust_index->n_core_fields,
+ ULINT_UNDEFINED, offset_heap);
}
}
@@ -3876,7 +3891,7 @@ exhausted:
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
- *offsets = rec_get_offsets(rec, index, *offsets, true,
+ *offsets = rec_get_offsets(rec, index, *offsets, index->n_core_fields,
ULINT_UNDEFINED, heap);
if (!lock_clust_rec_cons_read_sees(rec, index, *offsets,
@@ -4042,7 +4057,7 @@ row_sel_fill_vrow(
ut_ad(!index->is_instant());
ut_ad(page_rec_is_leaf(rec));
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap);
*vrow = dtuple_create_with_vcol(
@@ -4289,8 +4304,7 @@ row_search_mvcc(
index key, if this is covered index scan or virtual key read is
requested. */
bool need_vrow = dict_index_has_virtual(prebuilt->index)
- && (prebuilt->read_just_key
- || prebuilt->m_read_virtual_key);
+ && prebuilt->read_just_key;
/* Reset the new record lock info if srv_locks_unsafe_for_binlog
is set or session is using a READ COMMITTED isolation level. Then
@@ -4693,7 +4707,7 @@ wait_table_again:
const rec_t* next_rec = page_rec_get_next_const(rec);
offsets = rec_get_offsets(next_rec, index, offsets,
- true,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(pcur,
next_rec, index, offsets,
@@ -4777,7 +4791,8 @@ rec_loop:
level we do not lock gaps. Supremum record is really
a gap and therefore we do not set locks there. */
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(pcur,
rec, index, offsets,
@@ -4880,7 +4895,7 @@ wrong_offs:
ut_ad(fil_page_index_page_check(btr_pcur_get_page(pcur)));
ut_ad(btr_page_get_index_id(btr_pcur_get_page(pcur)) == index->id);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
@@ -5143,7 +5158,8 @@ no_gap_lock:
Do a normal locking read. */
offsets = rec_get_offsets(
- rec, index, offsets, true,
+ rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
goto locks_ok;
case DB_DEADLOCK:
@@ -5435,7 +5451,6 @@ use_covering_index:
if ((match_mode == ROW_SEL_EXACT
|| prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD)
&& prebuilt->select_lock_type == LOCK_NONE
- && !prebuilt->m_no_prefetch
&& !prebuilt->templ_contains_blob
&& !prebuilt->clust_index_was_generated
&& !prebuilt->used_in_HANDLER
@@ -5517,7 +5532,7 @@ use_covering_index:
/* We used 'offsets' for the clust
rec, recalculate them for 'rec' */
offsets = rec_get_offsets(rec, index, offsets,
- true,
+ index->n_core_fields,
ULINT_UNDEFINED,
&heap);
result_rec = rec;
@@ -5977,7 +5992,7 @@ row_search_autoinc_read_column(
rec_offs_init(offsets_);
ut_ad(page_rec_is_leaf(rec));
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
col_no + 1, &heap);
if (rec_offs_nth_sql_null(offsets, col_no)) {
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 160de0b88a5..617fcf68c20 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -111,7 +111,8 @@ row_undo_ins_remove_clust_rec(
rec_t* rec = btr_pcur_get_rec(&node->pcur);
- ut_ad(rec_get_trx_id(rec, index) == node->trx->id);
+ ut_ad(rec_get_trx_id(rec, index) == node->trx->id
+ || node->table->is_temporary());
ut_ad(!rec_get_deleted_flag(rec, index->table->not_redundant())
|| rec_is_alter_metadata(rec, index->table->not_redundant()));
ut_ad(rec_is_metadata(rec, index->table->not_redundant())
@@ -120,7 +121,8 @@ row_undo_ins_remove_clust_rec(
if (online && dict_index_is_online_ddl(index)) {
mem_heap_t* heap = NULL;
const rec_offs* offsets = rec_get_offsets(
- rec, index, NULL, true, ULINT_UNDEFINED, &heap);
+ rec, index, NULL, index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
row_log_table_delete(rec, index, offsets, NULL);
mem_heap_free(heap);
} else {
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 3b72f173862..ae2a710d24b 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -110,7 +110,8 @@ row_undo_mod_clust_low(
ut_ad(success);
ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur),
btr_cur_get_index(btr_cur))
- == thr_get_trx(thr)->id);
+ == thr_get_trx(thr)->id
+ || btr_cur_get_index(btr_cur)->table->is_temporary());
ut_ad(node->ref != &trx_undo_metadata
|| node->update->info_bits == REC_INFO_METADATA_ADD
|| node->update->info_bits == REC_INFO_METADATA_ALTER);
@@ -213,8 +214,9 @@ static ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index)
rec_offs_init(offsets_);
mem_heap_t* heap = NULL;
const ulint trx_id_pos = index->n_uniq ? index->n_uniq : 1;
- rec_offs* offsets = rec_get_offsets(rec, index, offsets_, true,
- trx_id_pos + 1, &heap);
+ rec_offs* offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
+ trx_id_pos + 1, &heap);
ut_ad(!heap);
ulint len;
trx_id_offset = rec_get_nth_field_offs(
@@ -482,9 +484,9 @@ row_undo_mod_clust(
} else {
ut_ad(index->n_uniq <= MAX_REF_PARTS);
rec_offs_init(offsets_);
- offsets = rec_get_offsets(
- rec, index, offsets_, true, trx_id_pos + 2,
- &heap);
+ offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
+ trx_id_pos + 2, &heap);
ulint len;
trx_id_offset = rec_get_nth_field_offs(
offsets, trx_id_pos, &len);
@@ -869,7 +871,8 @@ try_again:
offsets_heap = NULL;
offsets = rec_get_offsets(
btr_cur_get_rec(btr_cur),
- index, NULL, true, ULINT_UNDEFINED, &offsets_heap);
+ index, nullptr, index->n_core_fields, ULINT_UNDEFINED,
+ &offsets_heap);
update = row_upd_build_sec_rec_difference_binary(
btr_cur_get_rec(btr_cur), index, offsets, entry, heap);
if (upd_get_n_fields(update) == 0) {
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 6cf41d2422c..11956d11b8d 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -187,7 +187,8 @@ row_undo_search_clust_to_pcur(
rec = btr_pcur_get_rec(&node->pcur);
- offsets = rec_get_offsets(rec, clust_index, offsets, true,
+ offsets = rec_get_offsets(rec, clust_index, offsets,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
found = row_get_rec_roll_ptr(rec, clust_index, offsets)
@@ -195,7 +196,7 @@ row_undo_search_clust_to_pcur(
if (found) {
ut_ad(row_get_rec_trx_id(rec, clust_index, offsets)
- == node->trx->id);
+ == node->trx->id || node->table->is_temporary());
if (dict_table_has_atomic_blobs(node->table)) {
/* There is no prefix of externally stored
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 7c05084d4d9..b594f8a8020 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -126,25 +126,23 @@ NOTE that since we do not hold dict_sys.latch when leaving the
function, it may be that the referencing table has been dropped when
we leave this function: this function is only for heuristic use!
-@return TRUE if referenced */
+@return true if referenced */
static
-ibool
+bool
row_upd_index_is_referenced(
/*========================*/
dict_index_t* index, /*!< in: index */
trx_t* trx) /*!< in: transaction */
{
dict_table_t* table = index->table;
- ibool froze_data_dict = FALSE;
- ibool is_referenced = FALSE;
if (table->referenced_set.empty()) {
- return(FALSE);
+ return false;
}
- if (trx->dict_operation_lock_mode == 0) {
+ const bool froze_data_dict = !trx->dict_operation_lock_mode;
+ if (froze_data_dict) {
row_mysql_freeze_data_dictionary(trx);
- froze_data_dict = TRUE;
}
dict_foreign_set::iterator it
@@ -152,13 +150,13 @@ row_upd_index_is_referenced(
table->referenced_set.end(),
dict_foreign_with_index(index));
- is_referenced = (it != table->referenced_set.end());
+ const bool is_referenced = (it != table->referenced_set.end());
if (froze_data_dict) {
row_mysql_unfreeze_data_dictionary(trx);
}
- return(is_referenced);
+ return is_referenced;
}
#ifdef WITH_WSREP
@@ -1002,7 +1000,8 @@ row_upd_build_difference_binary(
n_diff = 0;
if (!offsets) {
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
} else {
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -2203,7 +2202,8 @@ row_upd_store_row(
rec = btr_pcur_get_rec(node->pcur);
- offsets = rec_get_offsets(rec, clust_index, offsets_, true,
+ offsets = rec_get_offsets(rec, clust_index, offsets_,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (dict_table_has_atomic_blobs(node->table)) {
@@ -2264,7 +2264,6 @@ row_upd_sec_index_entry(
dtuple_t* entry;
dict_index_t* index;
btr_cur_t* btr_cur;
- ibool referenced;
dberr_t err = DB_SUCCESS;
trx_t* trx = thr_get_trx(thr);
ulint mode;
@@ -2275,7 +2274,7 @@ row_upd_sec_index_entry(
index = node->index;
- referenced = row_upd_index_is_referenced(index, trx);
+ const bool referenced = row_upd_index_is_referenced(index, trx);
#ifdef WITH_WSREP
bool foreign = wsrep_row_upd_index_is_foreign(index, trx);
#endif /* WITH_WSREP */
@@ -2306,7 +2305,9 @@ row_upd_sec_index_entry(
break;
}
- if (!index->is_committed()) {
+ bool uncommitted = !index->is_committed();
+
+ if (uncommitted) {
/* The index->online_status may change if the index is
or was being created online, but not committed yet. It
is protected by index->lock. */
@@ -2435,7 +2436,7 @@ row_upd_sec_index_entry(
&& !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
rec_offs* offsets = rec_get_offsets(
- rec, index, NULL, true,
+ rec, index, NULL, index->n_core_fields,
ULINT_UNDEFINED, &heap);
err = wsrep_row_upd_check_foreign_constraints(
@@ -2478,12 +2479,9 @@ row_upd_sec_index_entry(
ut_ad(err == DB_SUCCESS);
if (referenced) {
-
- rec_offs* offsets;
-
- offsets = rec_get_offsets(
- rec, index, NULL, true, ULINT_UNDEFINED,
- &heap);
+ rec_offs* offsets = rec_get_offsets(
+ rec, index, NULL, index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
/* NOTE that the following call loses
the position of pcur ! */
@@ -2503,11 +2501,38 @@ row_upd_sec_index_entry(
mem_heap_empty(heap);
+ DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
+ "before_row_upd_sec_new_index_entry");
+
+ uncommitted = !index->is_committed();
+ if (uncommitted) {
+ mtr.start();
+ /* The index->online_status may change if the index is
+ being rollbacked. It is protected by index->lock. */
+
+ mtr_s_lock_index(index, &mtr);
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_CREATION:
+ break;
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ mtr_commit(&mtr);
+ goto func_exit;
+ }
+
+ }
+
/* Build a new index entry */
entry = row_build_index_entry(node->upd_row, node->upd_ext,
index, heap);
ut_a(entry);
+ if (uncommitted) {
+ mtr_commit(&mtr);
+ }
+
/* Insert new index entry */
err = row_ins_sec_index_entry(index, entry, thr, !node->is_delete);
@@ -2653,12 +2678,13 @@ row_upd_clust_rec_by_insert(
upd_node_t* node, /*!< in/out: row update node */
dict_index_t* index, /*!< in: clustered index of the record */
que_thr_t* thr, /*!< in: query thread */
- ibool referenced,/*!< in: TRUE if index may be referenced in
+ bool referenced,/*!< in: whether index may be referenced in
a foreign key constraint */
#ifdef WITH_WSREP
bool foreign,/*!< in: whether this is a foreign key */
#endif
- mtr_t* mtr) /*!< in/out: mtr; gets committed here */
+ mtr_t* mtr) /*!< in/out: mini-transaction,
+ may be committed and restarted */
{
mem_heap_t* heap;
btr_pcur_t* pcur;
@@ -2707,7 +2733,8 @@ row_upd_clust_rec_by_insert(
we update the primary key. Delete-mark the old record
in the clustered index and prepare to insert a new entry. */
rec = btr_cur_get_rec(btr_cur);
- offsets = rec_get_offsets(rec, index, offsets, true,
+ offsets = rec_get_offsets(rec, index, offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap);
ut_ad(page_rec_is_user_rec(rec));
@@ -2728,10 +2755,7 @@ row_upd_clust_rec_by_insert(
btr_cur_get_block(btr_cur), rec, index, offsets,
thr, node->row, mtr);
if (err != DB_SUCCESS) {
-err_exit:
- mtr_commit(mtr);
- mem_heap_free(heap);
- return(err);
+ goto err_exit;
}
/* If the the new row inherits externally stored
@@ -2791,14 +2815,14 @@ check_fk:
}
}
- mtr_commit(mtr);
+ mtr->commit();
+ mtr->start();
+ node->state = UPD_NODE_INSERT_CLUSTERED;
err = row_ins_clust_index_entry(index, entry, thr,
dtuple_get_n_ext(entry));
- node->state = UPD_NODE_INSERT_CLUSTERED;
-
+err_exit:
mem_heap_free(heap);
-
return(err);
}
@@ -2818,7 +2842,8 @@ row_upd_clust_rec(
mem_heap_t** offsets_heap,
/*!< in/out: memory heap, can be emptied */
que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr) /*!< in: mtr; gets committed here */
+ mtr_t* mtr) /*!< in,out: mini-transaction; may be
+ committed and restarted here */
{
mem_heap_t* heap = NULL;
big_rec_t* big_rec = NULL;
@@ -2864,16 +2889,15 @@ row_upd_clust_rec(
goto success;
}
- mtr_commit(mtr);
-
if (buf_LRU_buf_pool_running_out()) {
-
err = DB_LOCK_TABLE_FULL;
goto func_exit;
}
+
/* We may have to modify the tree structure: do a pessimistic descent
down the index tree */
+ mtr->commit();
mtr->start();
if (index->table->is_temporary()) {
@@ -2923,7 +2947,6 @@ success:
}
}
- mtr_commit(mtr);
func_exit:
if (heap) {
mem_heap_free(heap);
@@ -2948,17 +2971,17 @@ row_upd_del_mark_clust_rec(
rec_offs* offsets,/*!< in/out: rec_get_offsets() for the
record under the cursor */
que_thr_t* thr, /*!< in: query thread */
- ibool referenced,
- /*!< in: TRUE if index may be referenced in
+ bool referenced,
+ /*!< in: whether index may be referenced in
a foreign key constraint */
#ifdef WITH_WSREP
bool foreign,/*!< in: whether this is a foreign key */
#endif
- mtr_t* mtr) /*!< in: mtr; gets committed here */
+ mtr_t* mtr) /*!< in,out: mini-transaction;
+ will be committed and restarted */
{
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
- dberr_t err;
rec_t* rec;
trx_t* trx = thr_get_trx(thr);
@@ -2974,8 +2997,7 @@ row_upd_del_mark_clust_rec(
if (!row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt && thr->prebuilt->table == node->table
? thr->prebuilt->m_mysql_table : NULL)) {
- err = DB_COMPUTE_VALUE_FAILED;
- return err;
+ return DB_COMPUTE_VALUE_FAILED;
}
/* Mark the clustered index record deleted; we do not have to check
@@ -2983,7 +3005,7 @@ row_upd_del_mark_clust_rec(
rec = btr_cur_get_rec(btr_cur);
- err = btr_cur_del_mark_set_clust_rec(
+ dberr_t err = btr_cur_del_mark_set_clust_rec(
btr_cur_get_block(btr_cur), rec,
index, offsets, thr, node->row, mtr);
@@ -3020,8 +3042,6 @@ row_upd_del_mark_clust_rec(
#endif /* WITH_WSREP */
}
- mtr_commit(mtr);
-
return(err);
}
@@ -3038,14 +3058,12 @@ row_upd_clust_step(
{
dict_index_t* index;
btr_pcur_t* pcur;
- ibool success;
dberr_t err;
mtr_t mtr;
rec_t* rec;
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets;
- ibool referenced;
ulint flags;
trx_t* trx = thr_get_trx(thr);
@@ -3053,8 +3071,7 @@ row_upd_clust_step(
index = dict_table_get_first_index(node->table);
- referenced = row_upd_index_is_referenced(index, trx);
-
+ const bool referenced = row_upd_index_is_referenced(index, trx);
#ifdef WITH_WSREP
const bool foreign = wsrep_row_upd_index_is_foreign(index, trx);
#endif
@@ -3100,14 +3117,9 @@ row_upd_clust_step(
mode = BTR_MODIFY_LEAF;
}
- success = btr_pcur_restore_position(mode, pcur, &mtr);
-
- if (!success) {
+ if (!btr_pcur_restore_position(mode, pcur, &mtr)) {
err = DB_RECORD_NOT_FOUND;
-
- mtr_commit(&mtr);
-
- return(err);
+ goto exit_func;
}
/* If this is a row in SYS_INDEXES table of the data dictionary,
@@ -3127,19 +3139,14 @@ row_upd_clust_step(
mtr.start();
index->set_modified(mtr);
- success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur,
- &mtr);
- if (!success) {
+ if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, &mtr)) {
err = DB_ERROR;
-
- mtr.commit();
-
- return(err);
+ goto exit_func;
}
}
rec = btr_pcur_get_rec(pcur);
- offsets = rec_get_offsets(rec, index, offsets_, true,
+ offsets = rec_get_offsets(rec, index, offsets_, index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (!flags && !node->has_clust_rec_x_lock) {
@@ -3147,7 +3154,6 @@ row_upd_clust_step(
0, btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
- mtr.commit();
goto exit_func;
}
}
@@ -3158,8 +3164,6 @@ row_upd_clust_step(
btr_pcur_get_block(pcur),
page_rec_get_heap_no(rec)));
- /* NOTE: the following function calls will also commit mtr */
-
if (node->is_delete == PLAIN_DELETE) {
err = row_upd_del_mark_clust_rec(
node, index, offsets, thr, referenced,
@@ -3167,13 +3171,7 @@ row_upd_clust_step(
foreign,
#endif
&mtr);
-
- if (err == DB_SUCCESS) {
- node->state = UPD_NODE_UPDATE_ALL_SEC;
- node->index = dict_table_get_next_index(index);
- }
-
- goto exit_func;
+ goto all_done;
}
/* If the update is made for MySQL, we already have the update vector
@@ -3188,14 +3186,13 @@ row_upd_clust_step(
}
if (!node->is_delete && node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
-
err = row_upd_clust_rec(
flags, node, index, offsets, &heap, thr, &mtr);
goto exit_func;
}
- if(!row_upd_store_row(node, trx->mysql_thd,
- thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL)) {
+ if (!row_upd_store_row(node, trx->mysql_thd, thr->prebuilt
+ ? thr->prebuilt->m_mysql_table : NULL)) {
err = DB_COMPUTE_VALUE_FAILED;
goto exit_func;
}
@@ -3220,34 +3217,31 @@ row_upd_clust_step(
foreign,
#endif
&mtr);
- if (err != DB_SUCCESS) {
-
- goto exit_func;
+all_done:
+ if (err == DB_SUCCESS) {
+ node->state = UPD_NODE_UPDATE_ALL_SEC;
+success:
+ node->index = dict_table_get_next_index(index);
}
-
- node->state = UPD_NODE_UPDATE_ALL_SEC;
} else {
err = row_upd_clust_rec(
flags, node, index, offsets, &heap, thr, &mtr);
- if (err != DB_SUCCESS) {
-
- goto exit_func;
+ if (err == DB_SUCCESS) {
+ ut_ad(node->is_delete != PLAIN_DELETE);
+ node->state = node->is_delete
+ ? UPD_NODE_UPDATE_ALL_SEC
+ : UPD_NODE_UPDATE_SOME_SEC;
+ goto success;
}
-
- ut_ad(node->is_delete != PLAIN_DELETE);
- node->state = node->is_delete ?
- UPD_NODE_UPDATE_ALL_SEC :
- UPD_NODE_UPDATE_SOME_SEC;
}
- node->index = dict_table_get_next_index(index);
-
exit_func:
- if (heap) {
+ mtr.commit();
+ if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
- return(err);
+ return err;
}
/***********************************************************//**
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index aa2400a91ad..cde4e9e7b89 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -116,7 +116,8 @@ row_vers_impl_x_locked_low(
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(clust_rec, clust_index, clust_offsets_,
- true, ULINT_UNDEFINED, &heap);
+ clust_index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
if (trx_id == 0) {
@@ -239,7 +240,8 @@ not_locked:
}
clust_offsets = rec_get_offsets(
- prev_version, clust_index, clust_offsets_, true,
+ prev_version, clust_index, clust_offsets_,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
vers_del = rec_get_deleted_flag(prev_version, comp);
@@ -569,7 +571,8 @@ row_vers_build_cur_vrow_low(
clust_offsets = rec_get_offsets(prev_version, clust_index,
NULL,
- true, ULINT_UNDEFINED, &heap);
+ clust_index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
ulint entry_len = dict_index_get_n_fields(index);
@@ -711,7 +714,8 @@ row_vers_vc_matches_cluster(
clust_offsets = rec_get_offsets(prev_version, clust_index,
NULL,
- true, ULINT_UNDEFINED, &heap);
+ clust_index->n_core_fields,
+ ULINT_UNDEFINED, &heap);
ulint entry_len = dict_index_get_n_fields(index);
@@ -849,7 +853,8 @@ row_vers_build_cur_vrow(
index, roll_ptr, trx_id, v_heap, &cur_vrow, mtr);
}
- *clust_offsets = rec_get_offsets(rec, clust_index, NULL, true,
+ *clust_offsets = rec_get_offsets(rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
return(cur_vrow);
}
@@ -906,7 +911,8 @@ row_vers_old_has_index_entry(
comp = page_rec_is_comp(rec);
ut_ad(!dict_table_is_comp(index->table) == !comp);
heap = mem_heap_create(1024);
- clust_offsets = rec_get_offsets(rec, clust_index, NULL, true,
+ clust_offsets = rec_get_offsets(rec, clust_index, NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (dict_index_has_virtual(index)) {
@@ -995,7 +1001,8 @@ row_vers_old_has_index_entry(
}
}
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
- true,
+ clust_index
+ ->n_core_fields,
ULINT_UNDEFINED, &heap);
} else {
@@ -1074,7 +1081,8 @@ unsafe_to_purge:
}
clust_offsets = rec_get_offsets(prev_version, clust_index,
- NULL, true,
+ NULL,
+ clust_index->n_core_fields,
ULINT_UNDEFINED, &heap);
if (dict_index_has_virtual(index)) {
@@ -1215,7 +1223,7 @@ row_vers_build_for_consistent_read(
*offsets = rec_get_offsets(
prev_version, index, *offsets,
- true, ULINT_UNDEFINED, offset_heap);
+ index->n_core_fields, ULINT_UNDEFINED, offset_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(prev_version, *offsets));
@@ -1331,11 +1339,10 @@ committed_version_trx:
semi-consistent read. */
version = rec;
- *offsets = rec_get_offsets(version,
- index, *offsets,
- true,
- ULINT_UNDEFINED,
- offset_heap);
+ *offsets = rec_get_offsets(
+ version, index, *offsets,
+ index->n_core_fields, ULINT_UNDEFINED,
+ offset_heap);
}
buf = static_cast<byte*>(
@@ -1378,7 +1385,8 @@ committed_version_trx:
}
version = prev_version;
- *offsets = rec_get_offsets(version, index, *offsets, true,
+ *offsets = rec_get_offsets(version, index, *offsets,
+ index->n_core_fields,
ULINT_UNDEFINED, offset_heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(version, *offsets));
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index a5305684935..d47b33ee851 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -248,9 +248,6 @@ ulong srv_buf_pool_load_pages_abort = LONG_MAX;
/** Lock table size in bytes */
ulint srv_lock_table_size = ULINT_MAX;
-/** innodb_idle_flush_pct */
-ulong srv_idle_flush_pct;
-
/** innodb_read_io_threads */
ulong srv_n_read_io_threads;
/** innodb_write_io_threads */
@@ -484,9 +481,6 @@ current_time % 5 != 0. */
#endif /* MEM_PERIODIC_CHECK */
# define SRV_MASTER_DICT_LRU_INTERVAL (47)
-/** Simulate compression failures. */
-UNIV_INTERN uint srv_simulate_comp_failures;
-
/** Buffer pool dump status frequence in percentages */
UNIV_INTERN ulong srv_buf_dump_status_frequency;
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 746935f5794..46c7dc785c8 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -3,7 +3,7 @@
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -2005,7 +2005,7 @@ files_checked:
to the data files and truncate or delete the log.
Unless --export is specified, no further change to
InnoDB files is needed. */
- ut_ad(!srv_force_recovery);
+ ut_ad(srv_force_recovery <= SRV_FORCE_IGNORE_CORRUPT);
ut_ad(srv_n_log_files_found <= 1);
ut_ad(recv_no_log_write);
buf_flush_sync_all_buf_pools();
@@ -2546,6 +2546,19 @@ void innodb_shutdown()
sync_check_close();
+ srv_sys_space.shutdown();
+ if (srv_tmp_space.get_sanity_check_status()) {
+ if (fil_system.temp_space) {
+ fil_system.temp_space->close();
+ }
+ srv_tmp_space.delete_files();
+ }
+ srv_tmp_space.shutdown();
+
+#ifdef WITH_INNODB_DISALLOW_WRITES
+ os_event_destroy(srv_allow_writes_event);
+#endif /* WITH_INNODB_DISALLOW_WRITES */
+
if (srv_was_started && srv_print_verbose_log) {
ib::info() << "Shutdown completed; log sequence number "
<< srv_shutdown_lsn
diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc
index c7613b618a1..b25476861a5 100644
--- a/storage/innobase/trx/trx0i_s.cc
+++ b/storage/innobase/trx/trx0i_s.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -710,7 +710,8 @@ fill_lock_data(
ut_a(n_fields > 0);
heap = NULL;
- offsets = rec_get_offsets(rec, index, offsets, true, n_fields, &heap);
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
+ n_fields, &heap);
/* format and store the data */
@@ -1258,13 +1259,16 @@ static void fetch_data_into_cache(trx_i_s_cache_t *cache)
/* Capture the state of transactions */
mutex_enter(&trx_sys.mutex);
- for (const trx_t *trx= UT_LIST_GET_FIRST(trx_sys.trx_list);
+ for (trx_t *trx= UT_LIST_GET_FIRST(trx_sys.trx_list);
trx != NULL;
trx= UT_LIST_GET_NEXT(trx_list, trx))
{
- if (trx_is_started(trx) && trx != purge_sys.query->trx)
+ if (trx->state != TRX_STATE_NOT_STARTED && trx != purge_sys.query->trx)
{
- fetch_data_into_cache_low(cache, trx);
+ mutex_enter(&trx->mutex);
+ if (trx->state != TRX_STATE_NOT_STARTED)
+ fetch_data_into_cache_low(cache, trx);
+ mutex_exit(&trx->mutex);
if (cache->is_truncated)
break;
}
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index fee96c44479..cd520f4f5f2 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2523,7 +2523,8 @@ trx_undo_prev_version_build(
rec_offs offsets_dbg[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_dbg);
ut_a(!rec_offs_any_null_extern(
- *old_vers, rec_get_offsets(*old_vers, index, offsets_dbg, true,
+ *old_vers, rec_get_offsets(*old_vers, index, offsets_dbg,
+ index->n_core_fields,
ULINT_UNDEFINED, &heap)));
#endif // defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 3e29ad838c9..ed1499e1392 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -443,8 +443,13 @@ static
void
trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
{
- trx_rsegf_t* rseg_header = trx_rsegf_get_new(
- rseg->space->id, rseg->page_no, mtr);
+ /* This is based on trx_rsegf_get_new().
+ We need to access buf_block_t. */
+ buf_block_t *block = buf_page_get(
+ page_id_t(rseg->space->id, rseg->page_no), 0, RW_S_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW);
+
+ const trx_rsegf_t* rseg_header = TRX_RSEG + block->frame;
if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT) == 0) {
trx_id_t id = mach_read_from_8(rseg_header
@@ -455,32 +460,20 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
}
if (rseg_header[TRX_RSEG_BINLOG_NAME]) {
- const char* binlog_name = reinterpret_cast<const char*>
- (rseg_header) + TRX_RSEG_BINLOG_NAME;
+ lsn_t lsn = std::max(block->page.newest_modification,
+ mach_read_from_8(FIL_PAGE_LSN
+ + block->frame));
compile_time_assert(TRX_RSEG_BINLOG_NAME_LEN == sizeof
trx_sys.recovered_binlog_filename);
-
- int cmp = *trx_sys.recovered_binlog_filename
- ? strncmp(binlog_name,
- trx_sys.recovered_binlog_filename,
- TRX_RSEG_BINLOG_NAME_LEN)
- : 1;
-
- if (cmp >= 0) {
- uint64_t binlog_offset = mach_read_from_8(
- rseg_header + TRX_RSEG_BINLOG_OFFSET);
- if (cmp) {
- memcpy(trx_sys.
- recovered_binlog_filename,
- binlog_name,
- TRX_RSEG_BINLOG_NAME_LEN);
- trx_sys.recovered_binlog_offset
- = binlog_offset;
- } else if (binlog_offset >
- trx_sys.recovered_binlog_offset) {
- trx_sys.recovered_binlog_offset
- = binlog_offset;
- }
+ if (lsn > trx_sys.recovered_binlog_lsn) {
+ trx_sys.recovered_binlog_lsn = lsn;
+ trx_sys.recovered_binlog_offset
+ = mach_read_from_8(
+ rseg_header
+ + TRX_RSEG_BINLOG_OFFSET);
+ memcpy(trx_sys.recovered_binlog_filename,
+ rseg_header + TRX_RSEG_BINLOG_NAME,
+ TRX_RSEG_BINLOG_NAME_LEN);
}
#ifdef WITH_WSREP
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 87e85b85939..f59c1f96693 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 01cb842cc0b..1bcc92f8b97 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -161,6 +161,11 @@ trx_init(
trx->lock.rec_cached = 0;
trx->lock.table_cached = 0;
+#ifdef WITH_WSREP
+ ut_ad(!trx->wsrep);
+ ut_ad(!trx->wsrep_event);
+ ut_ad(!trx->wsrep_UK_scan);
+#endif /* WITH_WSREP */
ut_ad(trx->get_flush_observer() == NULL);
}
@@ -369,6 +374,7 @@ trx_t *trx_create()
#ifdef WITH_WSREP
trx->wsrep_event= NULL;
+ ut_ad(!trx->wsrep_UK_scan);
#endif /* WITH_WSREP */
trx_sys.register_trx(trx);
@@ -414,9 +420,11 @@ void trx_t::free()
/* do not poison mutex */
MEM_NOACCESS(&id, sizeof id);
MEM_NOACCESS(&no, sizeof no);
- /* state is accessed by innobase_kill_connection() */
+ MEM_NOACCESS(&state, sizeof state);
MEM_NOACCESS(&is_recovered, sizeof is_recovered);
- /* wsrep is accessed by innobase_kill_connection() */
+#ifdef WITH_WSREP
+ MEM_NOACCESS(&wsrep, sizeof wsrep);
+#endif
MEM_NOACCESS(&read_view, sizeof read_view);
MEM_NOACCESS(&trx_list, sizeof trx_list);
MEM_NOACCESS(&lock, sizeof lock);
@@ -437,7 +445,7 @@ void trx_t::free()
MEM_NOACCESS(&start_time_micro, sizeof start_time_micro);
MEM_NOACCESS(&commit_lsn, sizeof commit_lsn);
MEM_NOACCESS(&table_id, sizeof table_id);
- /* mysql_thd is accessed by innobase_kill_connection() */
+ MEM_NOACCESS(&mysql_thd, sizeof mysql_thd);
MEM_NOACCESS(&mysql_log_file_name, sizeof mysql_log_file_name);
MEM_NOACCESS(&mysql_log_offset, sizeof mysql_log_offset);
MEM_NOACCESS(&n_mysql_tables_in_use, sizeof n_mysql_tables_in_use);
@@ -473,6 +481,8 @@ void trx_t::free()
MEM_NOACCESS(&flush_observer, sizeof flush_observer);
#ifdef WITH_WSREP
MEM_NOACCESS(&wsrep_event, sizeof wsrep_event);
+ ut_ad(!wsrep_UK_scan);
+ MEM_NOACCESS(&wsrep_UK_scan, sizeof wsrep_UK_scan);
#endif /* WITH_WSREP */
MEM_NOACCESS(&magic_n, sizeof magic_n);
trx_pools->mem_free(this);
@@ -1262,16 +1272,6 @@ trx_update_mod_tables_timestamp(
const time_t now = time(NULL);
trx_mod_tables_t::const_iterator end = trx->mod_tables.end();
-#ifdef UNIV_DEBUG
- const bool preserve_tables = !innodb_evict_tables_on_commit_debug
- || trx->is_recovered /* avoid trouble with XA recovery */
-# if 1 /* if dict_stats_exec_sql() were not playing dirty tricks */
- || mutex_own(&dict_sys.mutex)
-# else /* this would be more proper way to do it */
- || trx->dict_operation_lock_mode || trx->dict_operation
-# endif
- ;
-#endif
for (trx_mod_tables_t::const_iterator it = trx->mod_tables.begin();
it != end;
@@ -1287,26 +1287,6 @@ trx_update_mod_tables_timestamp(
intrusive. */
dict_table_t* table = it->first;
table->update_time = now;
-#ifdef UNIV_DEBUG
- if (preserve_tables || table->get_ref_count()
- || UT_LIST_GET_LEN(table->locks)) {
- /* do not evict when committing DDL operations
- or if some other transaction is holding the
- table handle */
- continue;
- }
- /* recheck while holding the mutex that blocks
- table->acquire() */
- mutex_enter(&dict_sys.mutex);
- mutex_enter(&lock_sys.mutex);
- const bool do_evict = !table->get_ref_count()
- && !UT_LIST_GET_LEN(table->locks);
- mutex_exit(&lock_sys.mutex);
- if (do_evict) {
- dict_sys.remove(table, true);
- }
- mutex_exit(&dict_sys.mutex);
-#endif
}
trx->mod_tables.clear();
@@ -1392,16 +1372,9 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr)
so that there will be no race condition in lock_release(). */
while (UNIV_UNLIKELY(is_referenced()))
ut_delay(srv_spin_wait_delay);
- release_locks();
- id= 0;
}
else
- {
ut_ad(read_only || !rsegs.m_redo.rseg);
- release_locks();
- }
-
- DEBUG_SYNC_C("after_trx_committed_in_memory");
if (read_only || !rsegs.m_redo.rseg)
{
@@ -1414,6 +1387,10 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr)
is_recovered= false;
}
+ release_locks();
+ id= 0;
+ DEBUG_SYNC_C("after_trx_committed_in_memory");
+
while (dict_table_t *table= UT_LIST_GET_FIRST(lock.evicted_tables))
{
UT_LIST_REMOVE(lock.evicted_tables, table);
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index fc2fbb7f240..a6a8661f699 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -38,6 +38,9 @@ Created 5/11/1994 Heikki Tuuri
#include <string>
#include "log.h"
#include "my_cpu.h"
+#ifndef DBUG_OFF
+#include "rem0rec.h"
+#endif
/**********************************************************//**
Returns the number of milliseconds since some epoch. The
@@ -625,4 +628,49 @@ fatal_or_error::~fatal_or_error()
} // namespace ib
+#ifndef DBUG_OFF
+static char dbug_print_buf[1024];
+
+const char * dbug_print_rec(const rec_t* rec, const rec_offs* offsets)
+{
+ rec_printer r(rec, offsets);
+ strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1);
+ return dbug_print_buf;
+}
+
+const char * dbug_print_rec(const rec_t* rec, ulint info, const rec_offs* offsets)
+{
+ rec_printer r(rec, info, offsets);
+ strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1);
+ return dbug_print_buf;
+}
+
+const char * dbug_print_rec(const dtuple_t* tuple)
+{
+ rec_printer r(tuple);
+ strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1);
+ return dbug_print_buf;
+}
+
+const char * dbug_print_rec(const dfield_t* field, ulint n)
+{
+ rec_printer r(field, n);
+ strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1);
+ return dbug_print_buf;
+}
+
+const char * dbug_print_rec(const rec_t* rec, dict_index_t* index)
+{
+ rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
+ rec_offs* offsets = offsets_;
+ rec_offs_init(offsets_);
+ mem_heap_t* tmp_heap = NULL;
+ offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
+ ULINT_UNDEFINED, &tmp_heap);
+ rec_printer r(rec, offsets);
+ strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1);
+ return dbug_print_buf;
+}
+#endif /* !DBUG_OFF */
+
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 1f734439ffe..aa99bc157c9 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -2523,9 +2523,6 @@ int ha_maria::info(uint flag)
MARIA_INFO maria_info;
char name_buff[FN_REFLEN];
- if (!table)
- return 0;
-
(void) maria_status(file, &maria_info, flag);
if (flag & HA_STATUS_VARIABLE)
{
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index fadf04861fa..4f3a2ae5f89 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -232,15 +232,16 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file,
uint max_page_size;
MARIA_FILE_BITMAP *bitmap= &share->bitmap;
uint size= share->block_size;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
pgcache_page_no_t first_bitmap_with_space;
#ifndef DBUG_OFF
/* We want to have a copy of the bitmap to be able to print differences */
size*= 2;
#endif
- if (((bitmap->map= (uchar*) my_malloc(size, MYF(MY_WME))) == NULL) ||
+ if (((bitmap->map= (uchar*) my_malloc(size, flag)) == NULL) ||
my_init_dynamic_array(&bitmap->pinned_pages,
- sizeof(MARIA_PINNED_PAGE), 1, 1, MYF(0)))
+ sizeof(MARIA_PINNED_PAGE), 1, 1, flag))
return 1;
bitmap->share= share;
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index a82637e2b82..592aab6da41 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -485,10 +485,11 @@ my_bool _ma_init_block_record(MARIA_HA *info)
{
MARIA_ROW *row= &info->cur_row, *new_row= &info->new_row;
MARIA_SHARE *share= info->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
uint default_extents;
DBUG_ENTER("_ma_init_block_record");
- if (!my_multi_malloc(MY_WME,
+ if (!my_multi_malloc(flag,
&row->empty_bits, share->base.pack_bytes,
&row->field_lengths,
share->base.max_field_lengths + 2,
@@ -527,13 +528,12 @@ my_bool _ma_init_block_record(MARIA_HA *info)
FULL_PAGE_SIZE(share) /
BLOB_SEGMENT_MIN_SIZE));
- if (my_init_dynamic_array(&info->bitmap_blocks,
- sizeof(MARIA_BITMAP_BLOCK), default_extents,
- 64, MYF(0)))
+ if (my_init_dynamic_array(&info->bitmap_blocks, sizeof(MARIA_BITMAP_BLOCK),
+ default_extents, 64, flag))
goto err;
info->cur_row.extents_buffer_length= default_extents * ROW_EXTENT_SIZE;
if (!(info->cur_row.extents= my_malloc(info->cur_row.extents_buffer_length,
- MYF(MY_WME))))
+ flag)))
goto err;
info->row_base_length= share->base_length;
@@ -2642,6 +2642,7 @@ static my_bool write_block_record(MARIA_HA *info,
LSN lsn;
my_off_t position;
uint save_my_errno;
+ myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("write_block_record");
head_block= bitmap_blocks->block;
@@ -2708,7 +2709,7 @@ static my_bool write_block_record(MARIA_HA *info,
for every data segment we want to store.
*/
if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size,
- row->head_length))
+ row->head_length, myflag))
DBUG_RETURN(1);
tmp_data_used= 0; /* Either 0 or last used uchar in 'data' */
@@ -4718,6 +4719,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record,
MARIA_EXTENT_CURSOR extent;
MARIA_COLUMNDEF *column, *end_column;
MARIA_ROW *cur_row= &info->cur_row;
+ myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("_ma_read_block_record2");
start_of_data= data;
@@ -4763,7 +4765,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record,
if (cur_row->extents_buffer_length < row_extent_size &&
_ma_alloc_buffer(&cur_row->extents,
&cur_row->extents_buffer_length,
- row_extent_size))
+ row_extent_size, myflag))
DBUG_RETURN(my_errno);
memcpy(cur_row->extents, data, ROW_EXTENT_SIZE);
data+= ROW_EXTENT_SIZE;
@@ -4944,7 +4946,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record,
cur_row->blob_length= blob_lengths;
DBUG_PRINT("info", ("Total blob length: %lu", blob_lengths));
if (_ma_alloc_buffer(&info->blob_buff, &info->blob_buff_size,
- blob_lengths))
+ blob_lengths, myflag))
DBUG_RETURN(my_errno);
blob_buffer= info->blob_buff;
}
@@ -5050,6 +5052,7 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff,
uint flag, row_extents, row_extents_size;
uint field_lengths __attribute__ ((unused));
uchar *extents, *end;
+ myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("read_row_extent_info");
if (!(data= get_record_position(share, buff,
@@ -5073,7 +5076,7 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff,
if (info->cur_row.extents_buffer_length < row_extents_size &&
_ma_alloc_buffer(&info->cur_row.extents,
&info->cur_row.extents_buffer_length,
- row_extents_size))
+ row_extents_size, myflag))
DBUG_RETURN(1);
memcpy(info->cur_row.extents, data, ROW_EXTENT_SIZE);
data+= ROW_EXTENT_SIZE;
@@ -5244,6 +5247,7 @@ my_bool _ma_cmp_block_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def,
my_bool _ma_scan_init_block_record(MARIA_HA *info)
{
MARIA_SHARE *share= info->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("_ma_scan_init_block_record");
DBUG_ASSERT(info->dfile.file == share->bitmap.file.file);
@@ -5253,7 +5257,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info)
*/
if (!(info->scan.bitmap_buff ||
((info->scan.bitmap_buff=
- (uchar *) my_malloc(share->block_size * 2, MYF(MY_WME))))))
+ (uchar *) my_malloc(share->block_size * 2, flag)))))
DBUG_RETURN(1);
info->scan.page_buff= info->scan.bitmap_buff + share->block_size;
info->scan.bitmap_end= info->scan.bitmap_buff + share->bitmap.max_total_size;
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 26a9241c205..e1722d7f2a8 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -1271,6 +1271,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend,
ulong UNINIT_VAR(left_length);
uint b_type;
char llbuff[22],llbuff2[22],llbuff3[22];
+ myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("check_dynamic_record");
pos= 0;
@@ -1378,7 +1379,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend,
{
if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size,
block_info.rec_len +
- share->base.extra_rec_buff_size))
+ share->base.extra_rec_buff_size, myflag))
{
_ma_check_print_error(param,
@@ -2694,7 +2695,7 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info,
(uchar *) my_malloc((uint)
share->base.default_rec_buff_size, MYF(0))) ||
_ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size,
- share->base.default_rec_buff_size))
+ share->base.default_rec_buff_size, MYF(0)))
{
_ma_check_print_error(param, "Not enough memory for extra record");
goto err;
@@ -3782,7 +3783,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
(uchar*) my_malloc((size_t) share->base.default_rec_buff_size,
MYF(0))) ||
_ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size,
- share->base.default_rec_buff_size))
+ share->base.default_rec_buff_size, MYF(0)))
{
_ma_check_print_error(param, "Not enough memory for extra record");
goto err;
@@ -4425,7 +4426,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+
(share->base.pack_reclength * i));
if (_ma_alloc_buffer(&sort_param[i].rec_buff, &sort_param[i].rec_buff_size,
- share->base.default_rec_buff_size))
+ share->base.default_rec_buff_size, MYF(0)))
{
_ma_check_print_error(param,"Not enough memory!");
goto err;
@@ -5155,7 +5156,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param)
if (_ma_alloc_buffer(&sort_param->rec_buff,
&sort_param->rec_buff_size,
block_info.rec_len +
- share->base.extra_rec_buff_size))
+ share->base.extra_rec_buff_size, MYF(0)))
{
if (param->max_record_length >= block_info.rec_len)
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index ba147f08b45..85e158a9fd6 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -64,10 +64,10 @@ int maria_create(const char *name, enum data_file_type datafile_type,
uint uniques, MARIA_UNIQUEDEF *uniquedefs,
MARIA_CREATE_INFO *ci,uint flags)
{
- register uint i,j;
+ uint i,j;
File UNINIT_VAR(dfile), UNINIT_VAR(file);
int errpos,save_errno, create_mode= O_RDWR | O_TRUNC, res;
- myf create_flag;
+ myf create_flag, common_flag= MY_WME, sync_dir= 0;
uint length,max_key_length,packed,pack_bytes,pointer,real_length_diff,
key_length,info_length,key_segs,options,min_key_length,
base_pos,long_varchar_count,
@@ -93,7 +93,6 @@ int maria_create(const char *name, enum data_file_type datafile_type,
MARIA_CREATE_INFO tmp_create_info;
my_bool tmp_table= FALSE; /* cache for presence of HA_OPTION_TMP_TABLE */
my_bool forced_packed;
- myf sync_dir= 0;
uchar *log_data= NULL;
my_bool encrypted= maria_encrypt_tables && datafile_type == BLOCK_RECORD;
my_bool insert_order= MY_TEST(flags & HA_PRESERVE_INSERT_ORDER);
@@ -104,6 +103,9 @@ int maria_create(const char *name, enum data_file_type datafile_type,
DBUG_ASSERT(maria_inited);
+ if (flags & HA_CREATE_TMP_TABLE)
+ common_flag|= MY_THREAD_SPECIFIC;
+
if (!ci)
{
bzero((char*) &tmp_create_info,sizeof(tmp_create_info));
@@ -148,7 +150,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
(double*) my_malloc((keys + uniques)*HA_MAX_KEY_SEG*sizeof(double) +
(keys + uniques)*HA_MAX_KEY_SEG*sizeof(ulong) +
sizeof(uint16) * columns,
- MYF(MY_WME | MY_ZEROFILL))))
+ MYF(common_flag | MY_ZEROFILL))))
DBUG_RETURN(my_errno);
nulls_per_key_part= (ulong*) (rec_per_key_part +
(keys + uniques) * HA_MAX_KEY_SEG);
@@ -924,7 +926,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if ((file= mysql_file_create_with_symlink(key_file_kfile, klinkname_ptr,
kfilename, 0, create_mode,
- MYF(MY_WME|create_flag))) < 0)
+ MYF(common_flag|create_flag))) < 0)
goto err;
errpos=1;
@@ -1027,7 +1029,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
MARIA_COLUMNDEF **col_order, **pos;
if (!(col_order= (MARIA_COLUMNDEF**) my_malloc(share.base.fields *
sizeof(MARIA_COLUMNDEF*),
- MYF(MY_WME))))
+ common_flag)))
goto err;
for (column= columndef, pos= col_order ;
column != end_column ;
@@ -1206,8 +1208,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
}
if ((dfile=
mysql_file_create_with_symlink(key_file_dfile, dlinkname_ptr,
- dfilename, 0, create_mode,
- MYF(MY_WME | create_flag | sync_dir))) < 0)
+ dfilename, 0, create_mode,
+ MYF(common_flag | create_flag | sync_dir))) < 0)
goto err;
errpos=3;
diff --git a/storage/maria/ma_crypt.c b/storage/maria/ma_crypt.c
index 95b84d38221..564edb4bbe8 100644
--- a/storage/maria/ma_crypt.c
+++ b/storage/maria/ma_crypt.c
@@ -268,7 +268,7 @@ static my_bool ma_crypt_data_pre_write_hook(PAGECACHE_IO_HOOK_ARGS *args)
return 1;
}
- if (!share->now_transactional)
+ if (!share->base.born_transactional)
{
/* store a random number instead of LSN (for counter block) */
store_rand_lsn(args->page);
@@ -392,7 +392,7 @@ static my_bool ma_crypt_index_pre_write_hook(PAGECACHE_IO_HOOK_ARGS *args)
return 1;
}
- if (!share->now_transactional)
+ if (!share->base.born_transactional)
{
/* store a random number instead of LSN (for counter block) */
store_rand_lsn(args->page);
diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c
index ae6fc57c114..829e5b5cd02 100644
--- a/storage/maria/ma_dynrec.c
+++ b/storage/maria/ma_dynrec.c
@@ -1477,6 +1477,8 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf,
File file;
uchar *UNINIT_VAR(to);
uint UNINIT_VAR(left_length);
+ MARIA_SHARE *share= info->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("_ma_read_dynamic_record");
if (filepos == HA_OFFSET_ERROR)
@@ -1507,13 +1509,13 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf,
if (block_of_record++ == 0) /* First block */
{
info->cur_row.total_length= block_info.rec_len;
- if (block_info.rec_len > (uint) info->s->base.max_pack_length)
+ if (block_info.rec_len > (uint) share->base.max_pack_length)
goto panic;
- if (info->s->base.blobs)
+ if (share->base.blobs)
{
if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size,
block_info.rec_len +
- info->s->base.extra_rec_buff_size))
+ share->base.extra_rec_buff_size, flag))
goto err;
}
to= info->rec_buff;
@@ -1549,7 +1551,7 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf,
there is no equivalent without seeking. We are at the right
position already. :(
*/
- if (info->s->file_read(info, to, block_info.data_len,
+ if (share->file_read(info, to, block_info.data_len,
filepos, MYF(MY_NABP)))
goto panic;
left_length-=block_info.data_len;
@@ -1769,6 +1771,7 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info,
uchar *UNINIT_VAR(to);
MARIA_BLOCK_INFO block_info;
MARIA_SHARE *share= info->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("_ma_read_rnd_dynamic_record");
#ifdef MARIA_EXTERNAL_LOCKING
@@ -1859,7 +1862,7 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info,
{
if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size,
block_info.rec_len +
- info->s->base.extra_rec_buff_size))
+ share->base.extra_rec_buff_size, flag))
goto err;
}
to= info->rec_buff;
diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c
index b464cf4f94e..2573133ece5 100644
--- a/storage/maria/ma_extra.c
+++ b/storage/maria/ma_extra.c
@@ -533,6 +533,7 @@ int maria_reset(MARIA_HA *info)
{
int error= 0;
MARIA_SHARE *share= info->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("maria_reset");
/*
Free buffers and reset the following flags:
@@ -553,13 +554,13 @@ int maria_reset(MARIA_HA *info)
{
info->rec_buff_size= 1; /* Force realloc */
_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size,
- share->base.default_rec_buff_size);
+ share->base.default_rec_buff_size, flag);
}
if (info->blob_buff_size > MARIA_SMALL_BLOB_BUFFER)
{
info->blob_buff_size= 1; /* Force realloc */
_ma_alloc_buffer(&info->blob_buff, &info->blob_buff_size,
- MARIA_SMALL_BLOB_BUFFER);
+ MARIA_SMALL_BLOB_BUFFER, flag);
}
}
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 89848fae03e..06183c72895 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -39,7 +39,7 @@ static void maria_scan_end_dummy(MARIA_HA *info);
static my_bool maria_once_init_dummy(MARIA_SHARE *, File);
static my_bool maria_once_end_dummy(MARIA_SHARE *);
static uchar *_ma_base_info_read(uchar *ptr, MARIA_BASE_INFO *base);
-static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state);
+static uchar *_ma_state_info_read(uchar *, MARIA_STATE_INFO *, myf);
#define get_next_element(to,pos,size) { memcpy((char*) to,pos,(size_t) size); \
pos+=size;}
@@ -98,6 +98,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
uint errpos;
MARIA_HA info,*m_info;
my_bitmap_map *changed_fields_bitmap;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
DBUG_ENTER("maria_clone_internal");
errpos= 0;
@@ -115,7 +116,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
errpos= 5;
/* alloc and set up private structure parts */
- if (!my_multi_malloc(MY_WME,
+ if (!my_multi_malloc(flag,
&m_info,sizeof(MARIA_HA),
&info.blobs,sizeof(MARIA_BLOB)*share->base.blobs,
&info.buff,(share->base.max_key_block_length*2+
@@ -163,10 +164,9 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
goto err;
/* The following should be big enough for all pinning purposes */
- if (my_init_dynamic_array(&info.pinned_pages,
- sizeof(MARIA_PINNED_PAGE),
+ if (my_init_dynamic_array(&info.pinned_pages, sizeof(MARIA_PINNED_PAGE),
MY_MAX(share->base.blobs*2 + 4,
- MARIA_MAX_TREE_LEVELS*3), 16, MYF(0)))
+ MARIA_MAX_TREE_LEVELS*3), 16, flag))
goto err;
@@ -202,7 +202,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
/* Allocate buffer for one record */
/* prerequisites: info->rec_buffer == 0 && info->rec_buff_size == 0 */
if (_ma_alloc_buffer(&info.rec_buff, &info.rec_buff_size,
- share->base.default_rec_buff_size))
+ share->base.default_rec_buff_size, flag))
goto err;
bzero(info.rec_buff, share->base.default_rec_buff_size);
@@ -265,6 +265,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
uint i,j,len,errpos,head_length,base_pos,keys, realpath_err,
key_parts,base_key_parts,unique_key_parts,fulltext_keys,uniques;
uint internal_table= MY_TEST(open_flags & HA_OPEN_INTERNAL_TABLE);
+ myf common_flag= open_flags & HA_OPEN_TMP_TABLE ? MY_THREAD_SPECIFIC : 0;
uint file_version;
size_t info_length;
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
@@ -322,13 +323,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
DEBUG_SYNC_C("mi_open_kfile");
if ((kfile=mysql_file_open(key_file_kfile, name_buff,
(open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
- MYF(MY_NOSYMLINKS))) < 0)
+ MYF(common_flag | MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
(kfile=mysql_file_open(key_file_kfile, name_buff,
(open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
- MYF(MY_NOSYMLINKS))) < 0)
+ MYF(common_flag | MY_NOSYMLINKS))) < 0)
goto err;
}
share->mode=open_mode;
@@ -393,7 +394,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
Allocate space for header information and for data that is too
big to keep on stack
*/
- if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME))))
+ if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME | common_flag))))
{
my_errno=ENOMEM;
goto err;
@@ -420,7 +421,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
}
share->state_diff_length=len-MARIA_STATE_INFO_SIZE;
- if (!_ma_state_info_read(disk_cache, &share->state))
+ if (!_ma_state_info_read(disk_cache, &share->state, common_flag))
goto err;
len= mi_uint2korr(share->state.header.base_info_length);
if (len != MARIA_BASE_INFO_SIZE)
@@ -561,12 +562,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->index_file_name.length= strlen(index_name);
share->data_file_name.length= strlen(data_name);
share->open_file_name.length= strlen(name);
- if (!my_multi_malloc(MY_WME,
+ if (!my_multi_malloc(MYF(MY_WME | common_flag),
&share,sizeof(*share),
- &rec_per_key_part,
- sizeof(double) * key_parts,
- &nulls_per_key_part,
- sizeof(long)* key_parts,
+ &rec_per_key_part, sizeof(double) * key_parts,
+ &nulls_per_key_part, sizeof(long)* key_parts,
&share->keyinfo,keys*sizeof(MARIA_KEYDEF),
&share->uniqueinfo,uniques*sizeof(MARIA_UNIQUEDEF),
&share->keyparts,
@@ -883,9 +882,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->options|= HA_OPTION_READ_ONLY_DATA;
share->is_log_table= FALSE;
- if (open_flags & HA_OPEN_TMP_TABLE ||
- (share->options & HA_OPTION_TMP_TABLE))
+ if (open_flags & HA_OPEN_TMP_TABLE || share->options & HA_OPTION_TMP_TABLE)
{
+ common_flag|= MY_THREAD_SPECIFIC;
share->options|= HA_OPTION_TMP_TABLE;
share->temporary= share->delay_key_write= 1;
share->write_flag=MYF(MY_NABP);
@@ -1113,13 +1112,13 @@ err:
*/
my_bool _ma_alloc_buffer(uchar **old_addr, size_t *old_size,
- size_t new_size)
+ size_t new_size, myf flag)
{
if (*old_size < new_size)
{
uchar *addr;
if (!(addr= (uchar*) my_realloc(*old_addr, new_size,
- MYF(MY_ALLOW_ZERO_PTR))))
+ MYF(MY_ALLOW_ZERO_PTR | flag))))
return 1;
*old_addr= addr;
*old_size= new_size;
@@ -1498,7 +1497,7 @@ uint _ma_state_info_write_sub(File file, MARIA_STATE_INFO *state, uint pWrite)
}
-static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state)
+static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state, myf flag)
{
uint i,keys,key_parts;
DBUG_ENTER("_ma_state_info_read");
@@ -1510,7 +1509,7 @@ static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state)
/* Allocate memory for key parts if not already done */
if (!state->rec_per_key_part &&
- !my_multi_malloc(MY_WME,
+ !my_multi_malloc(MYF(MY_WME | flag),
&state->rec_per_key_part,
sizeof(*state->rec_per_key_part) * key_parts,
&state->nulls_per_key_part,
@@ -1955,6 +1954,8 @@ void _ma_set_index_pagecache_callbacks(PAGECACHE_FILE *file,
int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share)
{
myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS : 0);
+ if (share->temporary)
+ flags|= MY_THREAD_SPECIFIC;
DEBUG_SYNC_C("mi_open_datafile");
info->dfile.file= share->bitmap.file.file=
mysql_file_open(key_file_dfile, share->data_file_name.str,
diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c
index e42e9300d14..d1c30a57146 100644
--- a/storage/maria/ma_packrec.c
+++ b/storage/maria/ma_packrec.c
@@ -1413,10 +1413,12 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff,
{
uchar *header= info->header;
uint head_length,UNINIT_VAR(ref_length);
+ MARIA_SHARE *share= maria->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
if (file >= 0)
{
- ref_length=maria->s->pack.ref_length;
+ ref_length=share->pack.ref_length;
/*
We can't use my_pread() here because _ma_read_rnd_pack_record assumes
position is ok
@@ -1426,11 +1428,11 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff,
return BLOCK_FATAL_ERROR;
DBUG_DUMP("header", header, ref_length);
}
- head_length= read_pack_length((uint) maria->s->pack.version, header,
+ head_length= read_pack_length((uint) share->pack.version, header,
&info->rec_len);
- if (maria->s->base.blobs)
+ if (share->base.blobs)
{
- head_length+= read_pack_length((uint) maria->s->pack.version,
+ head_length+= read_pack_length((uint) share->pack.version,
header + head_length, &info->blob_len);
/*
Ensure that the record buffer is big enough for the compressed
@@ -1439,7 +1441,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff,
*/
if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p,
info->rec_len + info->blob_len +
- maria->s->base.extra_rec_buff_size))
+ share->base.extra_rec_buff_size, flag))
return BLOCK_FATAL_ERROR; /* not enough memory */
bit_buff->blob_pos= *rec_buff_p + info->rec_len;
bit_buff->blob_end= bit_buff->blob_pos + info->blob_len;
@@ -1580,15 +1582,18 @@ _ma_mempack_get_block_info(MARIA_HA *maria,
size_t *rec_buff_size_p,
uchar *header)
{
- header+= read_pack_length((uint) maria->s->pack.version, header,
+ MARIA_SHARE *share= maria->s;
+ myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
+
+ header+= read_pack_length((uint) share->pack.version, header,
&info->rec_len);
- if (maria->s->base.blobs)
+ if (share->base.blobs)
{
- header+= read_pack_length((uint) maria->s->pack.version, header,
+ header+= read_pack_length((uint) share->pack.version, header,
&info->blob_len);
/* _ma_alloc_rec_buff sets my_errno on error */
if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p,
- info->blob_len + maria->s->base.extra_rec_buff_size))
+ info->blob_len + share->base.extra_rec_buff_size, flag))
return 0; /* not enough memory */
bit_buff->blob_pos= *rec_buff_p;
bit_buff->blob_end= *rec_buff_p + info->blob_len;
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index 8f108e3f03f..be8a9fe8b2a 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -1453,6 +1453,7 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
}
if (maria_is_crashed(info))
{
+ tprint(tracef, "\n");
eprint(tracef, "Table '%s' is crashed, skipping it. Please repair it with"
" aria_chk -r", share->open_file_name.str);
recovery_found_crashed_tables++;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index c89623daea9..6c9649ede45 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -1198,7 +1198,7 @@ extern my_bool _ma_read_cache(MARIA_HA *, IO_CACHE *info, uchar *buff,
uint re_read_if_possibly);
extern ulonglong ma_retrieve_auto_increment(const uchar *key, uint8 key_type);
extern my_bool _ma_alloc_buffer(uchar **old_addr, size_t *old_size,
- size_t new_size);
+ size_t new_size, myf flag);
extern size_t _ma_rec_unpack(MARIA_HA *info, uchar *to, uchar *from,
size_t reclength);
extern my_bool _ma_rec_check(MARIA_HA *info, const uchar *record,
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 8638399717e..fdca803ad96 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -550,9 +550,6 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation)
case HA_EXTRA_END_ALTER_COPY:
inspected = "HA_EXTRA_END_ALTER_COPY";
break;
- case HA_EXTRA_FAKE_START_STMT:
- inspected = "HA_EXTRA_FAKE_START_STMT";
- break;
#ifdef MRN_HAVE_HA_EXTRA_EXPORT
case HA_EXTRA_EXPORT:
inspected = "HA_EXTRA_EXPORT";
@@ -5919,7 +5916,7 @@ int ha_mroonga::wrapper_write_row_index(const uchar *buf)
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -5994,7 +5991,7 @@ int ha_mroonga::storage_write_row(const uchar *buf)
DBUG_RETURN(error);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
for (i = 0; i < n_columns; i++) {
Field *field = table->field[i];
@@ -6275,7 +6272,7 @@ int ha_mroonga::storage_write_row_multiple_column_indexes(const uchar *buf,
int error = 0;
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -6569,7 +6566,7 @@ int ha_mroonga::wrapper_update_row_index(const uchar *old_data,
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -6690,7 +6687,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
grn_obj new_value;
GRN_VOID_INIT(&new_value);
{
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
generic_store_bulk(field, &new_value);
}
grn_obj casted_value;
@@ -6719,7 +6716,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
storage_store_fields_for_prep_update(old_data, new_data, record_id);
{
mrn::Lock lock(&(share->record_mutex), have_unique_index());
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
if ((error = storage_prepare_delete_row_unique_indexes(old_data,
record_id))) {
DBUG_RETURN(error);
@@ -6744,7 +6741,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
#endif
if (bitmap_is_set(table->write_set, field->field_index)) {
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index));
if (field->is_null()) continue;
@@ -6821,7 +6818,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
if (table->found_next_number_field &&
!table->s->next_number_keypart &&
new_data == table->record[0]) {
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
Field_num *field = (Field_num *) table->found_next_number_field;
if (field->unsigned_flag || field->val_int() > 0) {
MRN_LONG_TERM_SHARE *long_term_share = share->long_term_share;
@@ -6878,7 +6875,7 @@ int ha_mroonga::storage_update_row_index(const uchar *old_data,
my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(old_data, table->record[0]);
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
mrn_change_encoding(ctx, NULL);
@@ -7094,7 +7091,7 @@ int ha_mroonga::wrapper_delete_row_index(const uchar *buf)
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -7245,7 +7242,7 @@ int ha_mroonga::storage_delete_row_index(const uchar *buf)
GRN_TEXT_INIT(&key, 0);
GRN_TEXT_INIT(&encoded_key, 0);
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
mrn_change_encoding(ctx, NULL);
@@ -11436,7 +11433,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id)
}
}
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index));
field->move_field_offset(ptr_diff);
if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) {
@@ -11501,7 +11498,7 @@ void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data,
)
#endif
) {
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index));
grn_obj value;
GRN_OBJ_INIT(&value, GRN_BULK, 0, grn_obj_get_range(ctx, grn_columns[i]));
@@ -11537,7 +11534,7 @@ void ha_mroonga::storage_store_fields_by_index(uchar *buf)
if (KEY_N_KEY_PARTS(key_info) == 1) {
my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(buf, table->record[0]);
Field *field = key_info->key_part->field;
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
field->move_field_offset(ptr_diff);
storage_store_field(field, (const char *)key, key_length);
field->move_field_offset(-ptr_diff);
diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp
index 778300a33d6..cb2ce7e35ca 100644
--- a/storage/mroonga/lib/mrn_debug_column_access.cpp
+++ b/storage/mroonga/lib/mrn_debug_column_access.cpp
@@ -20,7 +20,7 @@
#include "mrn_debug_column_access.hpp"
namespace mrn {
- DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap)
+ DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap)
: table_(table),
bitmap_(bitmap) {
#ifdef DBUG_ASSERT_EXISTS
diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp
index 7c2fd60344e..954e04135f8 100644
--- a/storage/mroonga/lib/mrn_debug_column_access.hpp
+++ b/storage/mroonga/lib/mrn_debug_column_access.hpp
@@ -25,12 +25,12 @@
namespace mrn {
class DebugColumnAccess {
TABLE *table_;
- MY_BITMAP *bitmap_;
+ MY_BITMAP **bitmap_;
#ifdef DBUG_ASSERT_EXISTS
- my_bitmap_map *map_;
+ MY_BITMAP *map_;
#endif
public:
- DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap);
+ DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap);
~DebugColumnAccess();
};
}
diff --git a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp
index 5536eecb255..73639685d0e 100644
--- a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp
+++ b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp
@@ -675,7 +675,8 @@ namespace mrn {
&normalized, &normalized_length, NULL);
uint16 new_blob_data_length;
if (normalized_length <= UINT_MAX16) {
- memcpy(grn_key, normalized, normalized_length);
+ if (normalized_length)
+ memcpy(grn_key, normalized, normalized_length);
if (normalized_length < *mysql_key_size) {
memset(grn_key + normalized_length,
'\0', *mysql_key_size - normalized_length);
diff --git a/storage/mroonga/vendor/groonga/lib/alloc.c b/storage/mroonga/vendor/groonga/lib/alloc.c
index 2e28431595a..5e556b83712 100644
--- a/storage/mroonga/vendor/groonga/lib/alloc.c
+++ b/storage/mroonga/vendor/groonga/lib/alloc.c
@@ -310,13 +310,13 @@ grn_alloc_info_free(grn_ctx *ctx)
}
#endif /* USE_MEMORY_DEBUG */
-#define GRN_CTX_SEGMENT_SIZE (1<<22)
+#define GRN_CTX_SEGMENT_SIZE (1U <<22)
#define GRN_CTX_SEGMENT_MASK (GRN_CTX_SEGMENT_SIZE - 1)
-#define GRN_CTX_SEGMENT_WORD (1<<31)
-#define GRN_CTX_SEGMENT_VLEN (1<<30)
-#define GRN_CTX_SEGMENT_LIFO (1<<29)
-#define GRN_CTX_SEGMENT_DIRTY (1<<28)
+#define GRN_CTX_SEGMENT_WORD (1U <<31)
+#define GRN_CTX_SEGMENT_VLEN (1U <<30)
+#define GRN_CTX_SEGMENT_LIFO (1U <<29)
+#define GRN_CTX_SEGMENT_DIRTY (1U <<28)
void
grn_alloc_init_ctx_impl(grn_ctx *ctx)
@@ -400,8 +400,8 @@ grn_ctx_alloc(grn_ctx *ctx, size_t size, int flags,
header[0] = i;
header[1] = (int32_t) size;
} else {
- i = ctx->impl->currseg;
- mi = &ctx->impl->segs[i];
+ if ((i = ctx->impl->currseg) >= 0)
+ mi = &ctx->impl->segs[i];
if (i < 0 || size + mi->nref > GRN_CTX_SEGMENT_SIZE) {
for (i = 0, mi = ctx->impl->segs;; i++, mi++) {
if (i >= GRN_CTX_N_SEGMENTS) {
diff --git a/storage/mroonga/vendor/groonga/lib/db.c b/storage/mroonga/vendor/groonga/lib/db.c
index f3769f9aa4c..418335aaf00 100644
--- a/storage/mroonga/vendor/groonga/lib/db.c
+++ b/storage/mroonga/vendor/groonga/lib/db.c
@@ -12494,7 +12494,7 @@ grn_db_init_builtin_types(grn_ctx *ctx)
GRN_OBJ_KEY_VAR_SIZE, 1 << 16);
if (!obj || DB_OBJ(obj)->id != GRN_DB_TEXT) { return GRN_FILE_CORRUPT; }
obj = deftype(ctx, "LongText",
- GRN_OBJ_KEY_VAR_SIZE, 1 << 31);
+ GRN_OBJ_KEY_VAR_SIZE, 1U << 31);
if (!obj || DB_OBJ(obj)->id != GRN_DB_LONG_TEXT) { return GRN_FILE_CORRUPT; }
obj = deftype(ctx, "TokyoGeoPoint",
GRN_OBJ_KEY_GEO_POINT, sizeof(grn_geo_point));
diff --git a/storage/mroonga/vendor/groonga/lib/pat.c b/storage/mroonga/vendor/groonga/lib/pat.c
index 642173e2fdc..01f6108fbd0 100644
--- a/storage/mroonga/vendor/groonga/lib/pat.c
+++ b/storage/mroonga/vendor/groonga/lib/pat.c
@@ -899,7 +899,7 @@ chop(grn_ctx *ctx, grn_pat *pat, const char **key, const char *end, uint32_t *lk
case GRN_OBJ_KEY_FLOAT :\
if ((size) == sizeof(int64_t)) {\
int64_t v = *(int64_t *)(key);\
- v ^= ((v >> 63)|(1LL << 63));\
+ v ^= ((v >> 63)|(1ULL << 63));\
grn_hton((keybuf), &v, (size));\
}\
break;\
@@ -924,7 +924,7 @@ chop(grn_ctx *ctx, grn_pat *pat, const char **key, const char *end, uint32_t *lk
if ((size) == sizeof(int64_t)) {\
int64_t v;\
grn_hton(&v, (key), (size));\
- *((int64_t *)(keybuf)) = v ^ (((v^(1LL<<63))>> 63)|(1LL<<63)); \
+ *((int64_t *)(keybuf)) = v ^ ((((int64_t)(v^(1ULL<<63)))>> 63)|(1ULL<<63)); \
}\
break;\
}\
diff --git a/storage/mroonga/vendor/groonga/lib/proc/proc_select.c b/storage/mroonga/vendor/groonga/lib/proc/proc_select.c
index 605fd42239f..1f2a5005401 100644
--- a/storage/mroonga/vendor/groonga/lib/proc/proc_select.c
+++ b/storage/mroonga/vendor/groonga/lib/proc/proc_select.c
@@ -2989,7 +2989,8 @@ grn_select(grn_ctx *ctx, grn_select_data *data)
char *cp = cache_key;
#define PUT_CACHE_KEY(string) \
- grn_memcpy(cp, (string).value, (string).length); \
+ if ((string).value) \
+ grn_memcpy(cp, (string).value, (string).length); \
cp += (string).length; \
*cp++ = '\0'
diff --git a/storage/mroonga/vendor/groonga/lib/str.c b/storage/mroonga/vendor/groonga/lib/str.c
index 6b2d17769ca..4f0a3a98699 100644
--- a/storage/mroonga/vendor/groonga/lib/str.c
+++ b/storage/mroonga/vendor/groonga/lib/str.c
@@ -46,7 +46,7 @@ grn_str_charlen_utf8(grn_ctx *ctx, const unsigned char *str, const unsigned char
if (*str & 0x80) {
int i;
int len;
- GRN_BIT_SCAN_REV(~(*str << 24), len);
+ GRN_BIT_SCAN_REV(~(((uint) *str) << 24), len);
len = 31 - len;
if ((unsigned int)(len - 2) >= 3) { /* (len == 1 || len >= 5) */
GRN_LOG(ctx, GRN_LOG_WARNING,
@@ -1963,7 +1963,8 @@ grn_bulk_write(grn_ctx *ctx, grn_obj *buf, const char *str, unsigned int len)
if ((rc = grn_bulk_resize(ctx, buf, GRN_BULK_VSIZE(buf) + len))) { return rc; }
}
curr = GRN_BULK_CURR(buf);
- grn_memcpy(curr, str, len);
+ if (str)
+ grn_memcpy(curr, str, len);
GRN_BULK_INCR_LEN(buf, len);
return rc;
}
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index ba6744ae815..8a0ca759871 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -1952,7 +1952,7 @@ static void make_traverse_code_tree(HUFF_TREE *huff_tree,
{
chr=element->a.leaf.element_nr;
huff_tree->code_len[chr]= (uchar) (8 * sizeof(ulonglong) - size);
- huff_tree->code[chr]= (code >> size);
+ huff_tree->code[chr]= (size == 8 * sizeof(ulonglong)) ? 0 : (code >> size);
if (huff_tree->height < 8 * sizeof(ulonglong) - size)
huff_tree->height= 8 * sizeof(ulonglong) - size;
}
@@ -2943,12 +2943,15 @@ static void flush_bits(void)
ulonglong bit_buffer;
bits= file_buffer.bits & ~7;
- bit_buffer= file_buffer.bitbucket >> bits;
- bits= BITS_SAVED - bits;
- while (bits > 0)
+ if (bits != BITS_SAVED)
{
- bits-= 8;
- *file_buffer.pos++= (uchar) (bit_buffer >> bits);
+ bit_buffer= file_buffer.bitbucket >> bits;
+ bits= BITS_SAVED - bits;
+ while (bits > 0)
+ {
+ bits-= 8;
+ *file_buffer.pos++= (uchar) (bit_buffer >> bits);
+ }
}
if (file_buffer.pos >= file_buffer.end)
(void) flush_buffer(~ (ulong) 0);
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index fd715c57a1f..e0e81f7cddc 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -908,7 +908,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
bmove_align(buf, table->s->default_values, table->s->reclength);
key_restore(buf, (byte*) key, key_info, key_len);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
my_ptrdiff_t ptrdiff= buf - table->record[0];
if (ptrdiff)
@@ -937,7 +937,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
field[1]->move_field_offset(-ptrdiff);
field[2]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return error_code(oqgraph::NO_MORE_DATA);
}
}
@@ -962,7 +962,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
field[1]->move_field_offset(-ptrdiff);
field[2]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
// Keep the latch around so we can use it in the query result later -
// See fill_record().
@@ -995,7 +995,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
bmove_align(record, table->s->default_values, table->s->reclength);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
my_ptrdiff_t ptrdiff= record - table->record[0];
if (ptrdiff)
@@ -1071,7 +1071,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
field[4]->move_field_offset(-ptrdiff);
field[5]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return 0;
}
diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc
index 144ebcddff4..acab0e73a3d 100644
--- a/storage/perfschema/pfs_engine_table.cc
+++ b/storage/perfschema/pfs_engine_table.cc
@@ -188,17 +188,15 @@ ha_rows PFS_engine_table_share::get_row_count(void) const
int PFS_engine_table_share::write_row(TABLE *table, const unsigned char *buf,
Field **fields) const
{
- my_bitmap_map *org_bitmap;
-
if (m_write_row == NULL)
{
return HA_ERR_WRONG_COMMAND;
}
/* We internally read from Fields to support the write interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= m_write_row(table, buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
@@ -256,7 +254,6 @@ int PFS_engine_table::read_row(TABLE *table,
unsigned char *buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
Field *f;
Field **fields_reset;
@@ -264,7 +261,7 @@ int PFS_engine_table::read_row(TABLE *table,
bool read_all= !bitmap_is_clear_all(table->write_set);
/* We internally write to Fields to support the read interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
/*
Some callers of the storage engine interface do not honor the
@@ -276,7 +273,7 @@ int PFS_engine_table::read_row(TABLE *table,
f->reset();
int result= read_row_values(table, buf, fields, read_all);
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
return result;
}
@@ -294,12 +291,10 @@ int PFS_engine_table::update_row(TABLE *table,
const unsigned char *new_buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
-
/* We internally read from Fields to support the write interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= update_row_values(table, old_buf, new_buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
@@ -308,12 +303,10 @@ int PFS_engine_table::delete_row(TABLE *table,
const unsigned char *buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
-
/* We internally read from Fields to support the delete interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= delete_row_values(table, buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
@@ -1240,11 +1233,11 @@ bool pfs_show_status(handlerton *hton, THD *thd,
break;
case 141:
name= "(filename_hash).count";
- size= filename_hash.count;
+ size= pfs_filename_hash.count;
break;
case 142:
name= "(filename_hash).size";
- size= filename_hash.size;
+ size= pfs_filename_hash.size;
break;
case 143:
name= "(host_hash).count";
diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc
index ca9e0385021..fd8da77fe40 100644
--- a/storage/perfschema/pfs_instr.cc
+++ b/storage/perfschema/pfs_instr.cc
@@ -143,7 +143,7 @@ PFS_thread *thread_array= NULL;
File instrumentation instances array.
@sa file_max
@sa file_lost
- @sa filename_hash
+ @sa pfs_filename_hash
*/
PFS_file *file_array= NULL;
@@ -189,8 +189,8 @@ static unsigned char *history_stmts_digest_token_array= NULL;
static char *thread_session_connect_attrs_array= NULL;
/** Hash table for instrumented files. */
-LF_HASH filename_hash;
-/** True if filename_hash is initialized. */
+LF_HASH pfs_filename_hash;
+/** True if pfs_filename_hash is initialized. */
static bool filename_hash_inited= false;
/**
@@ -586,7 +586,7 @@ int init_file_hash(void)
{
if ((! filename_hash_inited) && (file_max > 0))
{
- lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
+ lf_hash_init(&pfs_filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
0, 0, filename_hash_get_key, &my_charset_bin);
/* filename_hash.size= file_max; */
filename_hash_inited= true;
@@ -599,7 +599,7 @@ void cleanup_file_hash(void)
{
if (filename_hash_inited)
{
- lf_hash_destroy(&filename_hash);
+ lf_hash_destroy(&pfs_filename_hash);
filename_hash_inited= false;
}
}
@@ -1186,7 +1186,7 @@ void destroy_thread(PFS_thread *pfs)
}
/**
- Get the hash pins for @filename_hash.
+ Get the hash pins for @pfs_filename_hash.
@param thread The running thread.
@returns The LF_HASH pins for the thread.
*/
@@ -1196,7 +1196,7 @@ LF_PINS* get_filename_hash_pins(PFS_thread *thread)
{
if (! filename_hash_inited)
return NULL;
- thread->m_filename_hash_pins= lf_hash_get_pins(&filename_hash);
+ thread->m_filename_hash_pins= lf_hash_get_pins(&pfs_filename_hash);
}
return thread->m_filename_hash_pins;
}
@@ -1314,7 +1314,7 @@ find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
search:
entry= reinterpret_cast<PFS_file**>
- (lf_hash_search(&filename_hash, pins,
+ (lf_hash_search(&pfs_filename_hash, pins,
normalized_filename, normalized_length));
if (entry && (entry != MY_ERRPTR))
{
@@ -1359,7 +1359,7 @@ search:
pfs->m_identity= (const void *)pfs;
int res;
- res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins,
+ res= lf_hash_insert(&pfs_filename_hash, thread->m_filename_hash_pins,
&pfs);
if (likely(res == 0))
{
@@ -1426,7 +1426,7 @@ void destroy_file(PFS_thread *thread, PFS_file *pfs)
LF_PINS *pins= get_filename_hash_pins(thread);
DBUG_ASSERT(pins != NULL);
- lf_hash_delete(&filename_hash, pins,
+ lf_hash_delete(&pfs_filename_hash, pins,
pfs->m_filename, pfs->m_filename_length);
if (klass->is_singleton())
klass->m_singleton= NULL;
diff --git a/storage/perfschema/pfs_instr.h b/storage/perfschema/pfs_instr.h
index 81bc52d1d75..a5ff3b4a17d 100644
--- a/storage/perfschema/pfs_instr.h
+++ b/storage/perfschema/pfs_instr.h
@@ -698,7 +698,7 @@ void update_socket_derived_flags();
/** Update derived flags for all instruments. */
void update_instruments_derived_flags();
-extern LF_HASH filename_hash;
+extern LF_HASH pfs_filename_hash;
/** @} */
#endif
diff --git a/storage/perfschema/table_accounts.cc b/storage/perfschema/table_accounts.cc
index 708f8269a69..550f6614abb 100644
--- a/storage/perfschema/table_accounts.cc
+++ b/storage/perfschema/table_accounts.cc
@@ -43,8 +43,8 @@ table_accounts::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE accounts("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/perfschema/table_esgs_by_account_by_event_name.cc b/storage/perfschema/table_esgs_by_account_by_event_name.cc
index 22e4e0040f1..9a983eb076e 100644
--- a/storage/perfschema/table_esgs_by_account_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_esgs_by_account_by_event_name::m_share=
sizeof(pos_esgs_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esgs_by_host_by_event_name.cc b/storage/perfschema/table_esgs_by_host_by_event_name.cc
index 86cc2eb1b86..5ff9faf0c1e 100644
--- a/storage/perfschema/table_esgs_by_host_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_esgs_by_host_by_event_name::m_share=
sizeof(pos_esgs_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esgs_by_user_by_event_name.cc b/storage/perfschema/table_esgs_by_user_by_event_name.cc
index af73c1fc5fd..23b7b0f6689 100644
--- a/storage/perfschema/table_esgs_by_user_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_esgs_by_user_by_event_name::m_share=
sizeof(pos_esgs_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_account_by_event_name.cc b/storage/perfschema/table_esms_by_account_by_event_name.cc
index 7afdabcbbfe..312050aa9c9 100644
--- a/storage/perfschema/table_esms_by_account_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_esms_by_account_by_event_name::m_share=
sizeof(pos_esms_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_host_by_event_name.cc b/storage/perfschema/table_esms_by_host_by_event_name.cc
index 42629ab6c09..b390d1e17a4 100644
--- a/storage/perfschema/table_esms_by_host_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_esms_by_host_by_event_name::m_share=
sizeof(pos_esms_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_user_by_event_name.cc b/storage/perfschema/table_esms_by_user_by_event_name.cc
index f8708ac9a14..1fa1289aa8c 100644
--- a/storage/perfschema/table_esms_by_user_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_esms_by_user_by_event_name::m_share=
sizeof(pos_esms_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_account_by_event_name.cc b/storage/perfschema/table_ews_by_account_by_event_name.cc
index fa6258ec9ac..40e0152f889 100644
--- a/storage/perfschema/table_ews_by_account_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_ews_by_account_by_event_name::m_share=
sizeof(pos_ews_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_host_by_event_name.cc b/storage/perfschema/table_ews_by_host_by_event_name.cc
index e3ef7ca3720..d22d6fc8d79 100644
--- a/storage/perfschema/table_ews_by_host_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_ews_by_host_by_event_name::m_share=
sizeof(pos_ews_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_user_by_event_name.cc b/storage/perfschema/table_ews_by_user_by_event_name.cc
index cb99f749a9c..b2f8e1da824 100644
--- a/storage/perfschema/table_ews_by_user_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_ews_by_user_by_event_name::m_share=
sizeof(pos_ews_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_hosts.cc b/storage/perfschema/table_hosts.cc
index 8bc5310817c..221e0664590 100644
--- a/storage/perfschema/table_hosts.cc
+++ b/storage/perfschema/table_hosts.cc
@@ -44,7 +44,7 @@ table_hosts::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE hosts("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc
index b05f6ad004b..f18d6ceee20 100644
--- a/storage/perfschema/table_setup_actors.cc
+++ b/storage/perfschema/table_setup_actors.cc
@@ -49,9 +49,9 @@ table_setup_actors::m_share=
sizeof(PFS_simple_index),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE setup_actors("
- "HOST CHAR(60) collate utf8_bin default '%' not null,"
- "USER CHAR(16) collate utf8_bin default '%' not null,"
- "ROLE CHAR(16) collate utf8_bin default '%' not null)") }
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default '%' not null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null,"
+ "ROLE CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null)") }
};
PFS_engine_table* table_setup_actors::create()
diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc
index b396db1a814..59b0af453fb 100644
--- a/storage/perfschema/table_threads.cc
+++ b/storage/perfschema/table_threads.cc
@@ -46,8 +46,8 @@ table_threads::m_share=
"NAME VARCHAR(128) not null,"
"TYPE VARCHAR(10) not null,"
"PROCESSLIST_ID BIGINT unsigned,"
- "PROCESSLIST_USER VARCHAR(16),"
- "PROCESSLIST_HOST VARCHAR(60),"
+ "PROCESSLIST_USER VARCHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) "),"
+ "PROCESSLIST_HOST VARCHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) "),"
"PROCESSLIST_DB VARCHAR(64),"
"PROCESSLIST_COMMAND VARCHAR(16),"
"PROCESSLIST_TIME BIGINT,"
diff --git a/storage/perfschema/table_users.cc b/storage/perfschema/table_users.cc
index 883ebd36633..e9592c55f55 100644
--- a/storage/perfschema/table_users.cc
+++ b/storage/perfschema/table_users.cc
@@ -44,7 +44,7 @@ table_users::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE users("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 52c93c37806..b4fcb097b71 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -6116,8 +6116,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() {
Field *field =
table->key_info[table->s->next_number_index].key_part[0].field;
ulonglong max_val = rdb_get_int_col_max_value(field);
- my_bitmap_map *const old_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *const old_map = dbug_tmp_use_all_columns(table, &table->read_set);
last_val = field->val_int();
if (last_val != max_val) {
last_val++;
@@ -6132,7 +6131,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() {
}
}
#endif
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
}
m_keyread_only = save_keyread_only;
@@ -6169,15 +6168,15 @@ void ha_rocksdb::update_auto_incr_val_from_field() {
field = table->key_info[table->s->next_number_index].key_part[0].field;
max_val = rdb_get_int_col_max_value(field);
- my_bitmap_map *const old_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *const old_map =
+ dbug_tmp_use_all_columns(table, &table->read_set);
new_val = field->val_int();
// don't increment if we would wrap around
if (new_val != max_val) {
new_val++;
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
// Only update if positive value was set for auto_incr column.
if (new_val <= max_val) {
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue896.result b/storage/rocksdb/mysql-test/rocksdb/r/issue896.result
index 917c95733f7..6b742ebaf0c 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/issue896.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/issue896.result
@@ -9,7 +9,7 @@ KEY `d` (`d`)
INSERT INTO t1 VALUES (100, 'aaabbb', UNIX_TIMESTAMP(), 200);
EXPLAIN SELECT COUNT(*) FROM t1 FORCE INDEX(d);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL d 11 NULL # Using index
+1 SIMPLE t1 index NULL d 9 NULL # Using index
# segfault here without the fix
SELECT COUNT(*) FROM t1 FORCE INDEX(d);
COUNT(*)
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
index f2f9adebf46..daca7f7f78d 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
@@ -546,7 +546,7 @@ pk key1 col1
explain
select key1 from t30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t30 index NULL key1 20 NULL # Using index
+1 SIMPLE t30 index NULL key1 18 NULL # Using index
select key1 from t30;
key1
row1-key
@@ -618,7 +618,7 @@ row3 row3-key row3-data
explain
select * from t30 order by key1 limit 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t30 index NULL key1 20 NULL #
+1 SIMPLE t30 index NULL key1 18 NULL #
select * from t30 order by key1 limit 3;
pk key1 col1
row1 row1-key row1-data
@@ -627,7 +627,7 @@ row3 row3-key row3-data
explain
select * from t30 order by key1 desc limit 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t30 index NULL key1 20 NULL #
+1 SIMPLE t30 index NULL key1 18 NULL #
select * from t30 order by key1 desc limit 3;
pk key1 col1
row5 row5-key row5-data
diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc
index f0bc8a49761..99f1178d897 100644
--- a/storage/rocksdb/rdb_datadic.cc
+++ b/storage/rocksdb/rdb_datadic.cc
@@ -1489,12 +1489,12 @@ void Rdb_key_def::pack_with_make_sort_key(
DBUG_ASSERT(*dst != nullptr);
const int max_len = fpi->m_max_image_len;
- my_bitmap_map *old_map;
+ MY_BITMAP*old_map;
old_map= dbug_tmp_use_all_columns(field->table,
- field->table->read_set);
+ &field->table->read_set);
field->sort_string(*dst, max_len);
- dbug_tmp_restore_column_map(field->table->read_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->read_set, old_map);
*dst += max_len;
}
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
index b9f5d02bd51..8eae98955c3 100644
--- a/storage/sequence/sequence.cc
+++ b/storage/sequence/sequence.cc
@@ -115,13 +115,13 @@ THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to,
void ha_seq::set(unsigned char *buf)
{
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]);
Field *field = table->field[0];
field->move_field_offset(offset);
field->store(cur, true);
field->move_field_offset(-offset);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
int ha_seq::rnd_init(bool scan)
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 75558d333e0..f2bc24c47d4 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -3052,7 +3052,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
}
#if MYSQL_VERSION_ID>50100
- my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set );
+ MY_BITMAP * org_bitmap = dbug_tmp_use_all_columns ( table, &table->write_set );
#endif
Field ** field = table->field;
@@ -3198,7 +3198,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
m_iCurrentPos++;
#if MYSQL_VERSION_ID > 50100
- dbug_tmp_restore_column_map ( table->write_set, org_bitmap );
+ dbug_tmp_restore_column_map ( &table->write_set, org_bitmap );
#endif
SPH_RET(0);
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index b2600859b88..988c2305b04 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -10112,12 +10112,12 @@ int ha_spider::write_row(
if (!table->auto_increment_field_not_null)
{
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
table->next_number_field->store((longlong) 0, TRUE);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
force_auto_increment = FALSE;
table->file->insert_id_for_cur_row = 0;
@@ -10125,13 +10125,13 @@ int ha_spider::write_row(
} else if (auto_increment_mode == 2)
{
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
table->next_number_field->store((longlong) 0, TRUE);
table->auto_increment_field_not_null = FALSE;
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
force_auto_increment = FALSE;
table->file->insert_id_for_cur_row = 0;
diff --git a/storage/spider/mysql-test/spider/r/basic_sql.result b/storage/spider/mysql-test/spider/r/basic_sql.result
index 94a09fc317b..ba904b5f577 100644
--- a/storage/spider/mysql-test/spider/r/basic_sql.result
+++ b/storage/spider/mysql-test/spider/r/basic_sql.result
@@ -717,6 +717,10 @@ TRUNCATE TABLE ta_l;
connection master_1;
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
+connection master_1;
+create table t2345678911234567892123456789312345678941234567895123234234(id int) ENGINE=SPIDER
+COMMENT='host "192.168.21.1", user "spider", password "password", database "test32738123123123"';
+drop table t2345678911234567892123456789312345678941234567895123234234;
deinit
connection master_1;
diff --git a/storage/spider/mysql-test/spider/t/basic_sql.test b/storage/spider/mysql-test/spider/t/basic_sql.test
index 5bb040047fc..a3184a14beb 100644
--- a/storage/spider/mysql-test/spider/t/basic_sql.test
+++ b/storage/spider/mysql-test/spider/t/basic_sql.test
@@ -2677,6 +2677,11 @@ if ($USE_CHILD_GROUP2)
--connection master_1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
+--connection master_1
+create table t2345678911234567892123456789312345678941234567895123234234(id int) ENGINE=SPIDER
+ COMMENT='host "192.168.21.1", user "spider", password "password", database "test32738123123123"';
+drop table t2345678911234567892123456789312345678941234567895123234234;
+
--echo
--echo deinit
--disable_warnings
@@ -2689,6 +2694,7 @@ if ($USE_CHILD_GROUP2)
--connection child2_2
DROP DATABASE IF EXISTS auto_test_remote2;
}
+
--disable_query_log
--disable_result_log
--source test_deinit.inc
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index 6d2afc1fd55..264f85d74cb 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -1733,7 +1733,7 @@ int spider_db_append_key_where_internal(
DBUG_PRINT("info", ("spider end_key_part_map=%lu", end_key_part_map));
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set);
#endif
if (sql_kind == SPIDER_SQL_KIND_HANDLER)
@@ -2663,7 +2663,7 @@ end:
if (sql_kind == SPIDER_SQL_KIND_SQL)
dbton_hdl->set_order_pos(sql_type);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
DBUG_RETURN(0);
}
@@ -3200,8 +3200,8 @@ int spider_db_fetch_table(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -3209,7 +3209,7 @@ int spider_db_fetch_table(
spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
} else {
DBUG_PRINT("info", ("spider bitmap is not set %s",
@@ -3380,8 +3380,8 @@ int spider_db_fetch_key(
bitmap_is_set(table->write_set, field->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(field)));
@@ -3389,7 +3389,7 @@ int spider_db_fetch_key(
spider_db_fetch_row(share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -3504,15 +3504,15 @@ int spider_db_fetch_minimum_columns(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5670,8 +5670,8 @@ int spider_db_seek_tmp_table(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -5679,7 +5679,7 @@ int spider_db_seek_tmp_table(
spider_db_fetch_row(spider->share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5758,8 +5758,8 @@ int spider_db_seek_tmp_key(
bitmap_is_set(table->write_set, field->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(field)));
@@ -5767,7 +5767,7 @@ int spider_db_seek_tmp_key(
spider_db_fetch_row(spider->share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5849,8 +5849,8 @@ int spider_db_seek_tmp_minimum_columns(
bitmap_is_set(table->write_set, (*field)->field_index)));
*/
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -5859,7 +5859,7 @@ int spider_db_seek_tmp_minimum_columns(
DBUG_RETURN(error_num);
row->next();
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
else if (bitmap_is_set(table->read_set, (*field)->field_index))
@@ -9668,7 +9668,7 @@ int spider_db_open_item_string(
{
THD *thd = NULL;
TABLE *table;
- my_bitmap_map *saved_map;
+ MY_BITMAP *saved_map;
Time_zone *saved_time_zone;
String str_value;
char tmp_buf[MAX_FIELD_WIDTH];
@@ -9697,7 +9697,7 @@ int spider_db_open_item_string(
*/
table = field->table;
thd = table->in_use;
- saved_map = dbug_tmp_use_all_columns(table, table->write_set);
+ saved_map = dbug_tmp_use_all_columns(table, &table->write_set);
item->save_in_field(field, FALSE);
saved_time_zone = thd->variables.time_zone;
thd->variables.time_zone = UTC;
@@ -9742,7 +9742,7 @@ end:
if (thd)
{
thd->variables.time_zone = saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, saved_map);
+ dbug_tmp_restore_column_map(&table->write_set, saved_map);
}
}
@@ -9784,7 +9784,7 @@ int spider_db_open_item_int(
{
THD *thd = NULL;
TABLE *table;
- my_bitmap_map *saved_map;
+ MY_BITMAP *saved_map;
Time_zone *saved_time_zone;
String str_value;
bool print_quoted_string;
@@ -9812,7 +9812,7 @@ int spider_db_open_item_int(
*/
table = field->table;
thd = table->in_use;
- saved_map = dbug_tmp_use_all_columns(table, table->write_set);
+ saved_map = dbug_tmp_use_all_columns(table, &table->write_set);
item->save_in_field(field, FALSE);
saved_time_zone = thd->variables.time_zone;
thd->variables.time_zone = UTC;
@@ -9858,7 +9858,7 @@ end:
if (thd)
{
thd->variables.time_zone = saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, saved_map);
+ dbug_tmp_restore_column_map(&table->write_set, saved_map);
}
}
@@ -10178,8 +10178,8 @@ int spider_db_udf_fetch_table(
DBUG_RETURN(HA_ERR_END_OF_FILE);
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
for (
roop_count = 0,
@@ -10192,7 +10192,7 @@ int spider_db_udf_fetch_table(
spider_db_udf_fetch_row(trx, *field, row)))
{
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
DBUG_RETURN(error_num);
}
@@ -10202,7 +10202,7 @@ int spider_db_udf_fetch_table(
for (; roop_count < set_off; roop_count++, field++)
(*field)->set_default();
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
table->status = 0;
DBUG_RETURN(0);
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 420f14f9919..86ce0c530b1 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -9690,8 +9690,7 @@ int spider_mbase_handler::append_update_set(
mysql_share->append_column_name(str, (*fields)->field_index);
str->q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN);
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table,
- table->read_set);
+ MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set);
#endif
if (
spider_db_mbase_utility->
@@ -9700,12 +9699,12 @@ int spider_mbase_handler::append_update_set(
str->reserve(SPIDER_SQL_COMMA_LEN)
) {
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
}
str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
@@ -12426,8 +12425,8 @@ int spider_mbase_handler::append_insert_values(
bitmap_is_set(table->read_set, (*field)->field_index)
) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->read_set);
#endif
add_value = TRUE;
DBUG_PRINT("info",("spider is_null()=%s",
@@ -12449,7 +12448,7 @@ int spider_mbase_handler::append_insert_values(
if (str->reserve(SPIDER_SQL_NULL_LEN + SPIDER_SQL_COMMA_LEN))
{
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
str->length(0);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12463,7 +12462,7 @@ int spider_mbase_handler::append_insert_values(
str->reserve(SPIDER_SQL_COMMA_LEN)
) {
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
str->length(0);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12471,7 +12470,7 @@ int spider_mbase_handler::append_insert_values(
}
str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
}
}
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 3734233552d..d36f7624adf 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -2313,7 +2313,7 @@ int ha_tokudb::pack_row_in_buff(
int r = ENOSYS;
memset((void *) row, 0, sizeof(*row));
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
// Copy null bytes
memcpy(row_buff, record, table_share->null_bytes);
@@ -2362,7 +2362,7 @@ int ha_tokudb::pack_row_in_buff(
row->size = (size_t) (var_field_data_ptr - row_buff);
r = 0;
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return r;
}
@@ -2758,7 +2758,7 @@ DBT* ha_tokudb::create_dbt_key_from_key(
{
uint32_t size = 0;
uchar* tmp_buff = buff;
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
key->data = buff;
@@ -2797,7 +2797,7 @@ DBT* ha_tokudb::create_dbt_key_from_key(
key->size = size;
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return key;
}
@@ -2890,7 +2890,7 @@ DBT* ha_tokudb::pack_key(
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
- my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -2927,7 +2927,7 @@ DBT* ha_tokudb::pack_key(
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(key);
}
@@ -2955,7 +2955,7 @@ DBT* ha_tokudb::pack_ext_key(
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
- my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -3034,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key(
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(key);
}
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result
index 3b82bbcef4f..c01edef283e 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result
@@ -177,9 +177,8 @@ Note 1265 Data truncated for column 'a' at row 2
insert into t1 values ("1e+18446744073709551615"),("1e+18446744073709551616"),("1e-9223372036854775807"),("1e-9223372036854775809");
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
-Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t1`.`a` at row 2
+Warning 1264 Out of range value for column 'a' at row 2
Note 1265 Data truncated for column 'a' at row 3
-Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t1`.`a` at row 4
insert into t1 values ("123.4e"),("123.4e+2"),("123.4e-2"),("123e1"),("123e+0");
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
@@ -210,7 +209,7 @@ a
99999999.99
0.00
99999999.99
-0.00
+99999999.99
0.00
0.00
123.40
diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c
index 1ce180e30e4..9c6cb34137d 100644
--- a/strings/ctype-simple.c
+++ b/strings/ctype-simple.c
@@ -1795,9 +1795,10 @@ ret_sign:
{
if (negative)
{
- if (ull > (ulonglong) LONGLONG_MIN)
+ if (ull >= (ulonglong) LONGLONG_MIN)
{
- *error= MY_ERRNO_ERANGE;
+ if (ull != (ulonglong) LONGLONG_MIN)
+ *error= MY_ERRNO_ERANGE;
return (ulonglong) LONGLONG_MIN;
}
*error= 0;
diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c
index 312b903ea64..a519287c0e4 100644
--- a/strings/ctype-uca.c
+++ b/strings/ctype-uca.c
@@ -31467,9 +31467,11 @@ static inline uint16 *
my_uca_contraction_weight(const MY_CONTRACTIONS *list, my_wc_t *wc, size_t len)
{
MY_CONTRACTION *c, *last;
+ DBUG_ASSERT(len <= MY_UCA_MAX_CONTRACTION);
+
for (c= list->item, last= c + list->nitems; c < last; c++)
{
- if ((len == MY_UCA_MAX_CONTRACTION || c->ch[len] == 0) &&
+ if ((len >= MY_UCA_MAX_CONTRACTION || c->ch[len] == 0) &&
!c->with_context &&
!my_wmemcmp(c->ch, wc, len))
return c->weight;
@@ -33212,7 +33214,8 @@ my_char_weight_put(MY_UCA_WEIGHT_LEVEL *dst,
for (chlen= len; chlen > 1; chlen--)
{
- if ((from= my_uca_contraction_weight(&dst->contractions, str, chlen)))
+ if (chlen <= MY_UCA_MAX_CONTRACTION &&
+ (from= my_uca_contraction_weight(&dst->contractions, str, chlen)))
{
str+= chlen;
len-= chlen;
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index 586289c51fd..0c153793e8e 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -1162,9 +1162,12 @@ static size_t
my_snprintf_mb2(CHARSET_INFO *cs __attribute__((unused)),
char* to, size_t n, const char* fmt, ...)
{
+ size_t ret;
va_list args;
va_start(args,fmt);
- return my_vsnprintf_mb2(to, n, fmt, args);
+ ret= my_vsnprintf_mb2(to, n, fmt, args);
+ va_end(args);
+ return ret;
}
@@ -2391,9 +2394,12 @@ static size_t
my_snprintf_utf32(CHARSET_INFO *cs __attribute__((unused)),
char* to, size_t n, const char* fmt, ...)
{
+ size_t ret;
va_list args;
va_start(args,fmt);
- return my_vsnprintf_utf32(to, n, fmt, args);
+ ret= my_vsnprintf_utf32(to, n, fmt, args);
+ va_end(args);
+ return ret;
}
diff --git a/strings/decimal.c b/strings/decimal.c
index 9dae3d987f2..16bc887814a 100644
--- a/strings/decimal.c
+++ b/strings/decimal.c
@@ -921,20 +921,75 @@ internal_str2dec(const char *from, decimal_t *to, char **end, my_bool fixed)
if (endp+1 < end_of_string && (*endp == 'e' || *endp == 'E'))
{
int str_error;
- longlong exponent= my_strtoll10(endp+1, (char**) &end_of_string,
+ const char *end_of_exponent= end_of_string;
+ longlong exponent= my_strtoll10(endp+1, (char**) &end_of_exponent,
&str_error);
- if (end_of_string != endp +1) /* If at least one digit */
+ if (end_of_exponent != endp +1) /* If at least one digit */
{
- *end= (char*) end_of_string;
+ *end= (char*) end_of_exponent;
if (str_error > 0)
{
+ if (str_error == MY_ERRNO_ERANGE)
+ {
+ /*
+ Exponent is:
+ - a huge positive number that does not fit into ulonglong
+ - a huge negative number that does not fit into longlong
+ Skip all remaining digits.
+ */
+ for ( ; end_of_exponent < end_of_string &&
+ my_isdigit(&my_charset_latin1, *end_of_exponent)
+ ; end_of_exponent++)
+ { }
+ *end= (char*) end_of_exponent;
+ if (exponent == ~0)
+ {
+ if (!decimal_is_zero(to))
+ {
+ /*
+ Non-zero mantissa and a huge positive exponent that
+ does not fit into ulonglong, e.g.:
+ 1e111111111111111111111
+ */
+ error= E_DEC_OVERFLOW;
+ }
+ else
+ {
+ /*
+ Zero mantissa and a huge positive exponent that
+ does not fit into ulonglong, e.g.:
+ 0e111111111111111111111
+ Return zero without warnings.
+ */
+ }
+ }
+ else
+ {
+ /*
+ Huge negative exponent that does not fit into longlong, e.g.
+ 1e-111111111111111111111
+ 0e-111111111111111111111
+ Return zero without warnings.
+ */
+ }
+ goto fatal_error;
+ }
+
+ /*
+ Some other error, e.g. MY_ERRNO_EDOM
+ */
error= E_DEC_BAD_NUM;
goto fatal_error;
}
if (exponent > INT_MAX/2 || (str_error == 0 && exponent < 0))
{
- error= E_DEC_OVERFLOW;
+ /*
+ The exponent fits into ulonglong, but it's still huge, e.g.
+ 1e1111111111
+ */
+ if (!decimal_is_zero(to))
+ error= E_DEC_OVERFLOW;
goto fatal_error;
}
if (exponent < INT_MIN/2 && error != E_DEC_OVERFLOW)
diff --git a/strings/json_lib.c b/strings/json_lib.c
index 83d5fdaa016..7265afdf355 100644
--- a/strings/json_lib.c
+++ b/strings/json_lib.c
@@ -933,6 +933,7 @@ int json_read_value(json_engine_t *j)
{
int t_next, c_len, res;
+ j->value_type= JSON_VALUE_UNINITALIZED;
if (j->state == JST_KEY)
{
while (json_read_keyname_chr(j) == 0) {}
diff --git a/support-files/mariadb.pc.in b/support-files/mariadb.pc.in
index 7d321f28491..500e148d0ed 100644
--- a/support-files/mariadb.pc.in
+++ b/support-files/mariadb.pc.in
@@ -3,11 +3,12 @@ prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/@INSTALL_LIBDIR@
includedir=${prefix}/@INSTALL_INCLUDEDIR@
-# those are rarely present or not at all, but we export them regardless
+# its common to want to know where to install it.
+plugindir=${prefix}/@INSTALL_PLUGINDIR@
+# Below are rarely present or not at all, but we export them regardless
bindir=${prefix}/@INSTALL_BINDIR@
sbindir=${prefix}/@INSTALL_SBINDIR@
scriptdir=${prefix}/@INSTALL_SCRIPTDIR@
-plugindir=${prefix}/@INSTALL_PLUGINDIR@
docdir=${prefix}/@INSTALL_DOCDIR@
mandir=${prefix}/@INSTALL_MANDIR@
sharedir=${prefix}/@INSTALL_SHAREDIR@
diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in
index 3dc6f0fbc71..ca9262dd211 100644
--- a/support-files/mariadb.service.in
+++ b/support-files/mariadb.service.in
@@ -145,7 +145,7 @@ TimeoutStopSec=900
##
# Number of files limit. previously [mysqld_safe] open-files-limit
-LimitNOFILE=16384
+LimitNOFILE=32768
# Maximium core size. previously [mysqld_safe] core-file-size
# LimitCore=
diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in
index f4f0cd8c7f2..af99f0f3c5a 100644
--- a/support-files/mariadb@.service.in
+++ b/support-files/mariadb@.service.in
@@ -268,7 +268,7 @@ Group=mysql
##
# Number of files limit. previously [mysqld_safe] open-files-limit
-LimitNOFILE=16384
+LimitNOFILE=32768
# Maximium core size. previously [mysqld_safe] core-file-size
# LimitCore=
diff --git a/support-files/rpm/server-postin.sh b/support-files/rpm/server-postin.sh
index db249c326a6..61c417e3e7d 100644
--- a/support-files/rpm/server-postin.sh
+++ b/support-files/rpm/server-postin.sh
@@ -40,7 +40,7 @@ if [ $1 = 1 ] ; then
# Create a MySQL user and group. Do not report any problems if it already
# exists.
groupadd -r %{mysqld_group} 2> /dev/null || true
- useradd -M -r --home $datadir --shell /sbin/nologin --comment "MySQL server" --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true
+ useradd -M -r --home $datadir --shell /sbin/nologin --comment "MySQL server" --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true
# The user may already exist, make sure it has the proper group nevertheless (BUG#12823)
usermod --gid %{mysqld_group} %{mysqld_user} 2> /dev/null || true
@@ -81,7 +81,7 @@ if [ -x /usr/sbin/semodule ] ; then
/usr/sbin/semodule -i /usr/share/mysql/policy/selinux/mariadb.pp
fi
-if [ -x sbin/restorecon ] ; then
- sbin/restorecon -R var/lib/mysql
+if [ -x /sbin/restorecon ] ; then
+ /sbin/restorecon -R /var/lib/mysql
fi
diff --git a/support-files/rpm/server-posttrans.sh b/support-files/rpm/server-posttrans.sh
index 0845a68c791..313274c6140 100644
--- a/support-files/rpm/server-posttrans.sh
+++ b/support-files/rpm/server-posttrans.sh
@@ -1,10 +1,15 @@
if [ -r %{restart_flag} ] ; then
rm %{restart_flag}
- # only restart the server if it was alredy running
+ # only restart the server if it was already running
if [ -x /usr/bin/systemctl ] ; then
/usr/bin/systemctl daemon-reload > /dev/null 2>&1
- /usr/bin/systemctl try-restart mariadb.service > /dev/null 2>&1
+ if [ /usr/bin/systemctl is-active mysql ]; then
+ /usr/bin/systemctl restart mysql > /dev/null 2>&1
+ else
+ /usr/bin/systemctl try-restart mariadb.service > /dev/null 2>&1
+ fi
+ # not a systemd-enabled environment, use SysV startup script
elif %{_sysconfdir}/init.d/mysql status > /dev/null 2>&1; then
- %{_sysconfdir}/init.d/mysql restart
+ %{_sysconfdir}/init.d/mysql restart > /dev/null 2>&1
fi
fi
diff --git a/tests/grant.pl b/tests/grant.pl
deleted file mode 100755
index b50481a93fc..00000000000
--- a/tests/grant.pl
+++ /dev/null
@@ -1,750 +0,0 @@
-#!/usr/bin/env perl
-
-# Copyright (c) 2000, 2005 MySQL AB, 2009 Sun Microsystems, Inc.
-# Use is subject to license terms.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
-
-#
-# Testing of grants.
-# Note that this will delete all table and column grants !
-#
-
-use DBI;
-use Getopt::Long;
-use strict;
-
-use vars qw($dbh $user_dbh $opt_help $opt_Information $opt_force $opt_debug
- $opt_verbose $opt_server $opt_root_user $opt_password $opt_user
- $opt_database $opt_host $version $user $tables_cols $columns_cols
- $tmp_table $opt_silent);
-
-$version="1.1";
-$opt_help=$opt_Information=$opt_force=$opt_debug=$opt_verbose=$opt_silent=0;
-$opt_host="localhost",
-$opt_server="mysql";
-$opt_root_user="root";
-$opt_password="";
-$opt_user="grant_user";
-$opt_database="grant_test";
-
-GetOptions("Information","help","server=s","root-user=s","password=s","user","database=s","force","host=s","debug","verbose","silent") || usage();
-usage() if ($opt_help || $opt_Information);
-
-$user="$opt_user\@$opt_host";
-
-if (!$opt_force)
-{
- print_info()
-}
-
-$|=1;
-
-$tables_cols="Host, Db, User, Table_name, Grantor, Table_priv, Column_priv";
-$columns_cols="Host, Db, User, Table_name, Column_name, Column_priv";
-$tmp_table="/tmp/mysql-grant.test"; # Can't use $$ as we are logging result
-unlink($tmp_table);
-
-#
-# clear grant tables
-#
-
-$dbh = DBI->connect("DBI:mysql:mysql:$opt_host",
- $opt_root_user,$opt_password,
- { PrintError => 0}) || die "Can't connect to mysql server with user '$opt_root_user': $DBI::errstr\n";
-
-safe_query("delete from user where user='$opt_user' or user='${opt_user}2'");
-safe_query("delete from db where user='$opt_user'");
-safe_query("delete from tables_priv");
-safe_query("delete from columns_priv");
-safe_query("lock tables mysql.user write"); # Test lock tables
-safe_query("flush privileges");
-safe_query("unlock tables"); # should already be unlocked
-safe_query("drop database $opt_database",3); # Don't print possible error
-safe_query("create database $opt_database");
-
-# check that the user can't login yet
-
-user_connect(1);
-#goto test;
-
-#
-# Enable column grant code
-#
-safe_query("grant select(user) on mysql.user to $user");
-safe_query("revoke select(user) on mysql.user from $user");
-
-#
-# Test grants on user level
-#
-
-safe_query("grant select on *.* to $user");
-safe_query("set password FOR ${opt_user}2\@$opt_host = password('test')",1);
-safe_query("set password FOR $opt_user\@$opt_host=password('test')");
-user_connect(1);
-safe_query("set password FOR $opt_user\@$opt_host=''");
-user_connect(0);
-user_query("select * from mysql.user where user = '$opt_user'");
-user_query("select * from mysql.db where user = '$opt_user'");
-safe_query("grant select on *.* to $user,$user");
-safe_query("show grants for $user");
-user_connect(0);
-
-# The following should fail
-user_query("insert into mysql.user (host,user) values ('error','$opt_user')",1);
-user_query("update mysql.user set host='error' WHERE user='$opt_user'",1);
-user_query("create table $opt_database.test (a int,b int)",1);
-user_query("grant select on *.* to ${opt_user}2\@$opt_host",1);
-safe_query("revoke select on $opt_database.test from $opt_user\@opt_host",1);
-safe_query("revoke select on $opt_database.* from $opt_user\@opt_host",1);
-safe_query("revoke select on *.* from $opt_user",1);
-safe_query("grant select on $opt_database.not_exists to $opt_user",1);
-safe_query("grant FILE on $opt_database.test to $opt_user",1);
-safe_query("grant select on *.* to wrong___________user_name",1);
-safe_query("grant select on $opt_database.* to wrong___________user_name",1);
-user_connect(0);
-user_query("grant select on $opt_database.test to $opt_user with grant option",1);
-safe_query("set password FOR ''\@''=''",1);
-user_query("set password FOR root\@$opt_host = password('test')",1);
-
-# Change privileges for user
-safe_query("revoke select on *.* from $user");
-safe_query("grant create,update on *.* to $user");
-user_connect(0);
-safe_query("flush privileges");
-user_query("create table $opt_database.test (a int,b int)");
-user_query("update $opt_database.test set b=b+1 where a > 0",1);
-safe_query("show grants for $user");
-safe_query("revoke update on *.* from $user");
-user_connect(0);
-safe_query("grant select(c) on $opt_database.test to $user",1);
-safe_query("revoke select(c) on $opt_database.test from $user",1);
-safe_query("grant select on $opt_database.test to wrong___________user_name",1);
-user_query("INSERT INTO $opt_database.test values (2,0)",1);
-
-safe_query("grant ALL PRIVILEGES on *.* to $user");
-safe_query("REVOKE INSERT on *.* from $user");
-user_connect(0);
-user_query("INSERT INTO $opt_database.test values (1,0)",1);
-safe_query("grant INSERT on *.* to $user");
-user_connect(0);
-user_query("INSERT INTO $opt_database.test values (2,0)");
-user_query("select count(*) from $opt_database.test");
-safe_query("revoke SELECT on *.* from $user");
-user_connect(0);
-user_query("select count(*) from $opt_database.test",1);
-user_query("INSERT INTO $opt_database.test values (3,0)");
-safe_query("grant SELECT on *.* to $user");
-user_connect(0);
-user_query("select count(*) from $opt_database.test");
-safe_query("revoke ALL PRIVILEGES on *.* from $user");
-user_connect(1);
-safe_query("delete from user where user='$opt_user'");
-safe_query("flush privileges");
-if (0) # Only if no anonymous user on localhost.
-{
- safe_query("grant select on *.* to $opt_user");
- user_connect(0);
- safe_query("revoke select on *.* from $opt_user");
- user_connect(1);
-}
-safe_query("delete from user where user='$opt_user'");
-safe_query("flush privileges");
-
-#
-# Test grants on database level
-#
-safe_query("grant select on $opt_database.* to $user");
-safe_query("select * from mysql.user where user = '$opt_user'");
-safe_query("select * from mysql.db where user = '$opt_user'");
-user_connect(0);
-user_query("select count(*) from $opt_database.test");
-# The following should fail
-user_query("select * from mysql.user where user = '$opt_user'",1);
-user_query("insert into $opt_database.test values (4,0)",1);
-user_query("update $opt_database.test set a=1",1);
-user_query("delete from $opt_database.test",1);
-user_query("create table $opt_database.test2 (a int)",1);
-user_query("ALTER TABLE $opt_database.test add c int",1);
-user_query("CREATE INDEX dummy ON $opt_database.test (a)",1);
-user_query("drop table $opt_database.test",1);
-user_query("grant ALL PRIVILEGES on $opt_database.* to ${opt_user}2\@$opt_host",1);
-
-# Change privileges for user
-safe_query("grant ALL PRIVILEGES on $opt_database.* to $user WITH GRANT OPTION");
-user_connect(0);
-user_query("insert into $opt_database.test values (5,0)");
-safe_query("REVOKE ALL PRIVILEGES on * from $user",1);
-safe_query("REVOKE ALL PRIVILEGES on *.* from $user");
-safe_query("REVOKE ALL PRIVILEGES on $opt_database.* from $user");
-safe_query("REVOKE ALL PRIVILEGES on $opt_database.* from $user");
-user_connect(0);
-user_query("insert into $opt_database.test values (6,0)",1);
-safe_query("REVOKE GRANT OPTION on $opt_database.* from $user");
-user_connect(1);
-safe_query("grant ALL PRIVILEGES on $opt_database.* to $user");
-
-user_connect(0);
-user_query("select * from mysql.user where user = '$opt_user'",1);
-user_query("insert into $opt_database.test values (7,0)");
-user_query("update $opt_database.test set a=3 where a=2");
-user_query("delete from $opt_database.test where a=3");
-user_query("create table $opt_database.test2 (a int not null)");
-user_query("alter table $opt_database.test2 add b int");
-user_query("create index dummy on $opt_database.test2 (a)");
-user_query("update test,test2 SET test.a=test2.a where test.a=test2.a");
-user_query("drop table $opt_database.test2");
-user_query("show tables from grant_test");
-# These should fail
-user_query("insert into mysql.user (host,user) values ('error','$opt_user',0)",1);
-
-# Revoke database privileges
-safe_query("revoke ALL PRIVILEGES on $opt_database.* from $user");
-safe_query("select * from mysql.user where user = '$opt_user'");
-safe_query("select * from mysql.db where user = '$opt_user'");
-
-# Test multi-updates
-safe_query("grant CREATE,UPDATE,DROP on $opt_database.* to $user");
-user_connect(0);
-user_query("create table $opt_database.test2 (a int not null)");
-user_query("update test,test2 SET test.a=1 where 1",1);
-user_query("update test,test2 SET test.a=test2.a where 1",1);
-safe_query("grant SELECT on $opt_database.* to $user");
-user_connect(0);
-user_query("update test,test2 SET test.a=test2.a where test2.a=test.a");
-user_query("drop table $opt_database.test2");
-
-# Revoke database privileges
-safe_query("revoke ALL PRIVILEGES on $opt_database.* from $user");
-user_connect(1);
-
-#
-# Test of grants on table level
-#
-
-safe_query("grant create on $opt_database.test2 to $user");
-user_connect(0);
-user_query("create table $opt_database.test2 (a int not null)");
-user_query("show tables"); # Should only show test, not test2
-user_query("show columns from test",1);
-user_query("show keys from test",1);
-user_query("show columns from test2");
-user_query("show keys from test2");
-user_query("select * from test",1);
-safe_query("grant insert on $opt_database.test to $user");
-user_query("show tables");
-user_query("insert into $opt_database.test values (8,0)");
-user_query("update $opt_database.test set b=1",1);
-safe_query("grant update on $opt_database.test to $user");
-user_query("update $opt_database.test set b=2");
-
-user_query("update $opt_database.test,test2 SET test.b=3",1);
-safe_query("grant select on $opt_database.test2 to $user");
-user_query("update $opt_database.test,test2 SET test.b=3");
-safe_query("revoke select on $opt_database.test2 from $user");
-
-user_query("delete from $opt_database.test",1);
-safe_query("grant delete on $opt_database.test to $user");
-user_query("delete from $opt_database.test where a=1",1);
-user_query("update $opt_database.test set b=3 where b=1",1);
-user_query("update $opt_database.test set b=b+1",1);
-user_query("update $opt_database.test,test2 SET test.a=test2.a",1);
-
-#
-# Test global SELECT privilege combined with table level privileges
-#
-
-safe_query("grant SELECT on *.* to $user");
-user_connect(0);
-user_query("update $opt_database.test set b=b+1");
-user_query("update $opt_database.test set b=b+1 where a > 0");
-user_query("update $opt_database.test,test2 SET test.a=test2.a");
-user_query("update $opt_database.test,test2 SET test2.a=test.a",1);
-safe_query("revoke SELECT on *.* from $user");
-safe_query("grant SELECT on $opt_database.* to $user");
-user_connect(0);
-user_query("update $opt_database.test set b=b+1");
-user_query("update $opt_database.test set b=b+1 where a > 0");
-safe_query("grant UPDATE on *.* to $user");
-user_connect(0);
-user_query("update $opt_database.test set b=b+1");
-user_query("update $opt_database.test set b=b+1 where a > 0");
-safe_query("revoke UPDATE on *.* from $user");
-safe_query("revoke SELECT on $opt_database.* from $user");
-user_connect(0);
-user_query("update $opt_database.test set b=b+1 where a > 0",1);
-user_query("update $opt_database.test set b=b+1",1);
-
-# Add one privilege at a time until the user has all privileges
-user_query("select * from test",1);
-safe_query("grant select on $opt_database.test to $user");
-user_query("delete from $opt_database.test where a=1");
-user_query("update $opt_database.test set b=2 where b=1");
-user_query("update $opt_database.test set b=b+1");
-user_query("select count(*) from test");
-user_query("update test,test2 SET test.b=4",1);
-user_query("update test,test2 SET test2.a=test.a",1);
-user_query("update test,test2 SET test.a=test2.a",1);
-
-user_query("create table $opt_database.test3 (a int)",1);
-user_query("alter table $opt_database.test2 add c int",1);
-safe_query("grant alter on $opt_database.test2 to $user");
-user_query("alter table $opt_database.test2 add c int");
-user_query("create index dummy ON $opt_database.test (a)",1);
-safe_query("grant index on $opt_database.test2 to $user");
-user_query("create index dummy ON $opt_database.test2 (a)");
-user_query("insert into test2 SELECT a,a from test",1);
-safe_query("grant insert on test2 to $user",1); # No table: mysql.test2
-safe_query("grant insert(a) on $opt_database.test2 to $user");
-user_query("insert into test2 SELECT a,a from test",1);
-safe_query("grant insert(c) on $opt_database.test2 to $user");
-user_query("insert into test2 SELECT a,a from test");
-user_query("select count(*) from test2,test",1);
-user_query("select count(*) from test,test2",1);
-user_query("replace into test2 SELECT a from test",1);
-safe_query("grant update on $opt_database.test2 to $user");
-user_query("update test,test2 SET test2.a=test.a");
-user_query("update test,test2 SET test.b=test2.a where 0",1);
-user_query("update test,test2 SET test.a=2 where test2.a>100",1);
-user_query("update test,test2 SET test.a=test2.a",1);
-user_query("replace into test2 SELECT a,a from test",1);
-safe_query("grant DELETE on $opt_database.test2 to $user");
-user_query("replace into test2 SELECT a,a from test");
-user_query("insert into test (a) SELECT a from test2",1);
-safe_query("grant SELECT on $opt_database.test2 to $user");
-user_query("update test,test2 SET test.b=test2.a where 0");
-user_query("update test,test2 SET test.a=test2.a where test2.a>100");
-
-safe_query("revoke UPDATE on $opt_database.test2 from $user");
-safe_query("grant UPDATE (c) on $opt_database.test2 to $user");
-user_query("update test,test2 SET test.b=test2.a where 0");
-user_query("update test,test2 SET test.a=test2.a where test2.a>100");
-user_query("update test,test2 SET test2.a=test2.a where test2.a>100",1);
-user_query("update test,test2 SET test2.c=test2.a where test2.a>100");
-
-safe_query("revoke SELECT,UPDATE on $opt_database.test2 from $user");
-safe_query("grant UPDATE on $opt_database.test2 to $user");
-
-user_query("drop table $opt_database.test2",1);
-user_query("grant select on $opt_database.test2 to $user with grant option",1);
-safe_query("grant drop on $opt_database.test2 to $user with grant option");
-user_query("grant drop on $opt_database.test2 to $user with grant option");
-user_query("grant select on $opt_database.test2 to $user with grant option",1);
-
-# check rename privileges
-user_query("rename table $opt_database.test2 to $opt_database.test3",1);
-safe_query("grant CREATE,DROP on $opt_database.test3 to $user");
-user_query("rename table $opt_database.test2 to $opt_database.test3",1);
-user_query("create table $opt_database.test3 (a int)");
-safe_query("grant INSERT on $opt_database.test3 to $user");
-user_query("drop table $opt_database.test3");
-user_query("rename table $opt_database.test2 to $opt_database.test3");
-user_query("rename table $opt_database.test3 to $opt_database.test2",1);
-safe_query("grant ALTER on $opt_database.test3 to $user");
-user_query("rename table $opt_database.test3 to $opt_database.test2");
-safe_query("revoke DROP on $opt_database.test2 from $user");
-user_query("rename table $opt_database.test2 to $opt_database.test3");
-user_query("drop table if exists $opt_database.test2,$opt_database.test3",1);
-safe_query("drop table if exists $opt_database.test2,$opt_database.test3");
-
-# Check that the user doesn't have some user privileges
-user_query("create database $opt_database",1);
-user_query("drop database $opt_database",1);
-user_query("flush tables",1);
-safe_query("flush privileges");
-
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("revoke ALL PRIVILEGES on $opt_database.test from $user");
-safe_query("revoke ALL PRIVILEGES on $opt_database.test2 from $user");
-safe_query("revoke ALL PRIVILEGES on $opt_database.test3 from $user");
-safe_query("revoke GRANT OPTION on $opt_database.test2 from $user");
-safe_query("select $tables_cols from mysql.tables_priv");
-user_query("select count(a) from test",1);
-
-#
-# Test some grants on column level
-#
-
-safe_query("grant create,update on $opt_database.test2 to $user");
-user_query("create table $opt_database.test2 (a int not null)");
-user_query("delete from $opt_database.test where a=2",1);
-user_query("delete from $opt_database.test where A=2",1);
-user_query("update test set b=5 where b>0",1);
-user_query("update test,test2 SET test.b=5 where b>0",1);
-
-safe_query("grant update(b),delete on $opt_database.test to $user");
-safe_query("revoke update(a) on $opt_database.test from $user",1);
-user_query("delete from $opt_database.test where a=2",1);
-user_query("update test set b=5 where b>0",1);
-safe_query("grant select(a),select(b) on $opt_database.test to $user");
-user_query("delete from $opt_database.test where a=2");
-user_query("delete from $opt_database.test where A=2");
-user_query("update test set b=5 where b>0");
-user_query("update test set a=11 where b>5",1);
-user_query("update test,test2 SET test.b=5 where b>0",1);
-user_query("update test,test2 SET test.a=11 where b>0",1);
-user_query("update test,test2 SET test.b=test2.a where b>0",1);
-user_query("update test,test2 SET test.b=11 where test2.a>0",1);
-user_query("select a,A from test");
-
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("revoke ALL PRIVILEGES on $opt_database.test from $user");
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("revoke GRANT OPTION on $opt_database.test from $user",1);
-safe_query("drop table $opt_database.test2");
-safe_query("revoke create,update on $opt_database.test2 from $user");
-
-#
-# Test grants on database level
-#
-
-safe_query("grant select(a) on $opt_database.test to $user");
-user_query("show full columns from test");
-safe_query("grant insert (b), update (b) on $opt_database.test to $user");
-
-user_query("select count(a) from test");
-user_query("select count(skr.a) from test as skr");
-user_query("select count(a) from test where a > 5");
-user_query("insert into test (b) values (5)");
-user_query("insert into test (b) values (a)");
-user_query("update test set b=3 where a > 0");
-
-user_query("select * from test",1);
-user_query("select b from test",1);
-user_query("select a from test where b > 0",1);
-user_query("insert into test (a) values (10)",1);
-user_query("insert into test (b) values (b)",1);
-user_query("insert into test (a,b) values (1,5)",1);
-user_query("insert into test (b) values (1),(b)",1);
-user_query("update test set b=3 where b > 0",1);
-
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("select $columns_cols from mysql.columns_priv");
-safe_query("revoke select(a), update (b) on $opt_database.test from $user");
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("select $columns_cols from mysql.columns_priv");
-
-user_query("select count(a) from test",1);
-user_query("update test set b=4",1);
-
-safe_query("grant select(a,b), update (a,b) on $opt_database.test to $user");
-user_query("select count(a),count(b) from test where a+b > 0");
-user_query("insert into test (b) values (9)");
-user_query("update test set b=6 where b > 0");
-
-safe_query("flush privileges"); # Test restoring privileges from disk
-safe_query("select $tables_cols from mysql.tables_priv");
-safe_query("select $columns_cols from mysql.columns_priv");
-
-# Try mixing of table and database privileges
-
-user_query("insert into test (a,b) values (12,12)",1);
-safe_query("grant insert on $opt_database.* to $user");
-user_connect(0);
-user_query("insert into test (a,b) values (13,13)");
-
-# This grants and revokes SELECT on different levels.
-safe_query("revoke select(b) on $opt_database.test from $user");
-user_query("select count(a) from test where a+b > 0",1);
-user_query("update test set b=5 where a=2");
-safe_query("grant select on $opt_database.test to $user");
-user_connect(0);
-user_query("select count(a) from test where a+b > 0");
-safe_query("revoke select(b) on $opt_database.test from $user");
-user_query("select count(a) from test where a+b > 0");
-safe_query("revoke select on $opt_database.test from $user");
-user_connect(0);
-user_query("select count(a) from test where a+b > 0",1);
-safe_query("grant select(a) on $opt_database.test to $user");
-user_query("select count(a) from test where a+b > 0",1);
-safe_query("grant select on *.* to $user");
-user_connect(0);
-user_query("select count(a) from test where a+b > 0");
-safe_query("revoke select on *.* from $user");
-safe_query("grant select(b) on $opt_database.test to $user");
-user_connect(0);
-user_query("select count(a) from test where a+b > 0");
-
-
-safe_query("select * from mysql.db where user = '$opt_user'");
-safe_query("select $tables_cols from mysql.tables_priv where user = '$opt_user'");
-safe_query("select $columns_cols from mysql.columns_priv where user = '$opt_user'");
-
-safe_query("revoke ALL PRIVILEGES on $opt_database.test from $user");
-user_query("select count(a) from test",1);
-user_query("select * from mysql.user order by hostname",1);
-safe_query("select * from mysql.db where user = '$opt_user'");
-safe_query("select $tables_cols from mysql.tables_priv where user = '$opt_user'");
-safe_query("select $columns_cols from mysql.columns_priv where user = '$opt_user'");
-
-#
-# Clear up privileges to make future tests easier
-
-safe_query("delete from user where user='$opt_user'");
-safe_query("delete from db where user='$opt_user'");
-safe_query("flush privileges");
-safe_query("show grants for $user",1);
-
-#
-# Test IDENTIFIED BY
-#
-
-safe_query("grant ALL PRIVILEGES on $opt_database.test to $user identified by 'dummy', ${opt_user}\@127.0.0.1 identified by 'dummy2'");
-user_connect(0,"dummy");
-safe_query("grant SELECT on $opt_database.* to $user identified by ''");
-user_connect(0);
-safe_query("revoke ALL PRIVILEGES on $opt_database.test from $user identified by '', ${opt_user}\@127.0.0.1 identified by 'dummy2'");
-safe_query("revoke ALL PRIVILEGES on $opt_database.* from $user identified by ''");
-
-safe_query("show grants for $user");
-
-#
-# Test bug reported in SELECT INTO OUTFILE
-#
-
-safe_query("create table $opt_database.test3 (a int, b int)");
-safe_query("grant SELECT on $opt_database.test3 to $user");
-safe_query("grant FILE on *.* to $user");
-safe_query("insert into $opt_database.test3 values (1,1)");
-user_connect(0);
-user_query("select * into outfile '$tmp_table' from $opt_database.test3");
-safe_query("revoke SELECT on $opt_database.test3 from $user");
-safe_query("grant SELECT(a) on $opt_database.test3 to $user");
-user_query("select a from $opt_database.test3");
-user_query("select * from $opt_database.test3",1);
-user_query("select a,b from $opt_database.test3",1);
-user_query("select b from $opt_database.test3",1);
-
-safe_query("revoke SELECT(a) on $opt_database.test3 from $user");
-safe_query("revoke FILE on *.* from $user");
-safe_query("drop table $opt_database.test3");
-
-#
-# Test privileges needed for LOCK TABLES
-#
-
-safe_query("create table $opt_database.test3 (a int)");
-user_connect(1);
-safe_query("grant INSERT on $opt_database.test3 to $user");
-user_connect(0);
-user_query("select * into outfile '$tmp_table' from $opt_database.test3",1);
-safe_query("grant SELECT on $opt_database.test3 to $user");
-user_connect(0);
-user_query("LOCK TABLES $opt_database.test3 READ",1);
-safe_query("grant LOCK TABLES on *.* to $user");
-safe_query("show grants for $user");
-safe_query("select * from mysql.user where user='$opt_user'");
-user_connect(0);
-user_query("LOCK TABLES $opt_database.test3 READ");
-user_query("UNLOCK TABLES");
-safe_query("revoke SELECT,INSERT,UPDATE,DELETE on $opt_database.test3 from $user");
-user_connect(0);
-safe_query("revoke LOCK TABLES on *.* from $user");
-user_connect(1);
-safe_query("drop table $opt_database.test3");
-
-#
-# test new privileges in 4.0.2
-#
-
-safe_query("show grants for $user");
-safe_query("grant all on *.* to $user WITH MAX_QUERIES_PER_HOUR 1 MAX_UPDATES_PER_HOUR 2 MAX_CONNECTIONS_PER_HOUR 3");
-safe_query("show grants for $user");
-safe_query("revoke LOCK TABLES on *.* from $user");
-safe_query("flush privileges");
-safe_query("show grants for $user");
-safe_query("revoke ALL PRIVILEGES on *.* from $user");
-safe_query("show grants for $user");
-
-#
-# Clean up things
-#
-
-unlink($tmp_table);
-safe_query("drop database $opt_database");
-safe_query("delete from user where user='$opt_user'");
-safe_query("delete from db where user='$opt_user'");
-safe_query("delete from tables_priv");
-safe_query("delete from columns_priv");
-safe_query("flush privileges");
-
-print "end of test\n";
-exit 0;
-
-sub usage
-{
- print <<EOF;
-$0 Ver $version
-
-This program tests that the GRANT commands works by creating a temporary
-database ($opt_database) and user ($opt_user).
-
-Options:
-
---database (Default $opt_database)
- In which database the test tables are created.
-
---force
- Don''t ask any question before starting this test.
-
---host='host name' (Default $opt_host)
- Host name where the database server is located.
-
---Information
---help
- Print this help
-
---password
- Password for root-user.
-
---server='server name' (Default $opt_server)
- Run the test on the given SQL server.
-
---user (Default $opt_user)
- A non-existing user on which we will test the GRANT commands.
-
---verbose
- Write all queries when we are execute them.
-
---root-user='user name' (Default $opt_root_user)
- User with privileges to modify the 'mysql' database.
-EOF
- exit(0);
-}
-
-
-sub print_info
-{
- my $tmp;
- print <<EOF;
-This test will clear your table and column grant table and recreate the
-$opt_database database !
-All privileges for $user will be destroyed !
-
-Don\'t run this test if you have done any GRANT commands that you want to keep!
-EOF
- for (;;)
- {
- print "Start test (yes/no) ? ";
- $tmp=<STDIN>; chomp($tmp); $tmp=lc($tmp);
- last if ($tmp =~ /^yes$/i);
- exit 1 if ($tmp =~ /^n/i);
- print "\n";
- }
-}
-
-
-sub user_connect
-{
- my ($ignore_error,$password)=@_;
- $password="" if (!defined($password));
-
- print "Connecting $opt_user\n" if ($opt_verbose);
- $user_dbh->disconnect if (defined($user_dbh));
-
- $user_dbh=DBI->connect("DBI:mysql:$opt_database:$opt_host",$opt_user,
- $password, { PrintError => 0});
- if (!$user_dbh)
- {
- if ($opt_verbose || !$ignore_error)
- {
- print "Error on connect: $DBI::errstr\n";
- }
- if (!$ignore_error)
- {
- die "The above should not have failed!";
- }
- }
- elsif ($ignore_error)
- {
- die "Connect succeeded when it shouldn't have !\n";
- }
-}
-
-sub safe_query
-{
- my ($query,$ignore_error)=@_;
- if (do_query($dbh,$query, $ignore_error))
- {
- if (!defined($ignore_error))
- {
- die "The above should not have failed!";
- }
- }
- elsif (defined($ignore_error) && $ignore_error == 1)
- {
- die "Query '$query' succeeded when it shouldn't have !\n";
- }
-}
-
-
-sub user_query
-{
- my ($query,$ignore_error)=@_;
- if (do_query($user_dbh,$query, $ignore_error))
- {
- if (!defined($ignore_error))
- {
- die "Query '$query' should not have failed!";
- }
- }
- elsif (defined($ignore_error) && $ignore_error == 1)
- {
- die "Query '$query' succeeded when it shouldn't have !\n";
- }
-}
-
-
-sub do_query
-{
- my ($my_dbh, $query, $ignore_error)=@_;
- my ($sth, $row, $tab, $col, $found, $fatal_error);
-
- print "$query\n" if ($opt_debug || $opt_verbose);
- if (!($sth= $my_dbh->prepare($query)))
- {
- print "Error in prepare: $DBI::errstr\n";
- return 1;
- }
- if (!$sth->execute)
- {
- $fatal_error= ($DBI::errstr =~ /parse error/);
- if (!$ignore_error || ($opt_verbose && $ignore_error != 3) || $fatal_error)
- {
- print "Error in execute: $DBI::errstr\n";
- }
- die if ($fatal_error);
- $sth->finish;
- return 1;
- }
- $found=0;
- if (!$opt_silent)
- {
- while (($row=$sth->fetchrow_arrayref))
- {
- $found=1;
- $tab="";
- foreach $col (@$row)
- {
- print $tab;
- print defined($col) ? $col : "NULL";
- $tab="\t";
- }
- print "\n";
- }
- print "\n" if ($found);
- }
- $sth->finish;
- return 0;
-}
diff --git a/unittest/mysys/stacktrace-t.c b/unittest/mysys/stacktrace-t.c
index 8fa0db15b36..d8408f80d76 100644
--- a/unittest/mysys/stacktrace-t.c
+++ b/unittest/mysys/stacktrace-t.c
@@ -29,6 +29,7 @@ void test_my_safe_print_str()
memcpy(b_stack, "LEGAL", 6);
memcpy(b_bss, "LEGAL", 6);
+#ifdef HAVE_STACKTRACE
#ifndef __SANITIZE_ADDRESS__
fprintf(stderr, "\n===== stack =====\n");
my_safe_print_str(b_stack, 65535);
@@ -48,6 +49,7 @@ void test_my_safe_print_str()
fprintf(stderr, "\n===== (const char*) 1 =====\n");
my_safe_print_str((const char*)1, 5);
#endif /*__SANITIZE_ADDRESS__*/
+#endif /*HAVE_STACKTRACE*/
free(b_heap);
diff --git a/vio/viossl.c b/vio/viossl.c
index a5b3396953e..c886391a593 100644
--- a/vio/viossl.c
+++ b/vio/viossl.c
@@ -85,18 +85,35 @@ static void ssl_set_sys_error(int ssl_error)
@param vio VIO object representing a SSL connection.
@param ret Value returned by a SSL I/O function.
@param event[out] The type of I/O event to wait/retry.
+ @param should_wait[out] whether to wait for 'event'
@return Whether a SSL I/O operation should be deferred.
@retval TRUE Temporary failure, retry operation.
@retval FALSE Indeterminate failure.
*/
-static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event)
+static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event, my_bool *should_wait)
{
int ssl_error;
SSL *ssl= vio->ssl_arg;
my_bool should_retry= TRUE;
+#if defined(ERR_LIB_X509) && defined(X509_R_CERT_ALREADY_IN_HASH_TABLE)
+ /*
+ Ignore error X509_R_CERT_ALREADY_IN_HASH_TABLE.
+ This is a workaround for an OpenSSL bug in an older (< 1.1.1)
+ OpenSSL version.
+ */
+ unsigned long err = ERR_peek_error();
+ if (ERR_GET_LIB(err) == ERR_LIB_X509 &&
+ ERR_GET_REASON(err) == X509_R_CERT_ALREADY_IN_HASH_TABLE)
+ {
+ ERR_clear_error();
+ *should_wait= FALSE;
+ return TRUE;
+ }
+#endif
+
/* Retrieve the result for the SSL I/O operation. */
ssl_error= SSL_get_error(ssl, ret);
@@ -105,12 +122,15 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
{
case SSL_ERROR_WANT_READ:
*event= VIO_IO_EVENT_READ;
+ *should_wait= TRUE;
break;
case SSL_ERROR_WANT_WRITE:
*event= VIO_IO_EVENT_WRITE;
+ *should_wait= TRUE;
break;
default:
should_retry= FALSE;
+ *should_wait= FALSE;
ssl_set_sys_error(ssl_error);
ERR_clear_error();
break;
@@ -120,6 +140,32 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
}
+/**
+ Handle SSL io error.
+
+ @param[in] vio Vio
+ @param[in] ret return from the failed IO operation
+
+ @return 0 - should retry last read/write operation
+ 1 - some error has occured
+*/
+static int handle_ssl_io_error(Vio *vio, int ret)
+{
+ enum enum_vio_io_event event;
+ my_bool should_wait;
+
+ /* Process the SSL I/O error. */
+ if (!ssl_should_retry(vio, ret, &event, &should_wait))
+ return 1;
+
+ if (!should_wait)
+ return 1;
+
+ /* Attempt to wait for an I/O event. */
+ return vio_socket_io_wait(vio, event);
+}
+
+
size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
{
int ret;
@@ -135,13 +181,7 @@ size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
{
while ((ret= SSL_read(ssl, buf, (int)size)) < 0)
{
- enum enum_vio_io_event event;
-
- /* Process the SSL I/O error. */
- if (!ssl_should_retry(vio, ret, &event))
- break;
- /* Attempt to wait for an I/O event. */
- if (vio_socket_io_wait(vio, event))
+ if (handle_ssl_io_error(vio,ret))
break;
}
}
@@ -168,14 +208,7 @@ size_t vio_ssl_write(Vio *vio, const uchar *buf, size_t size)
{
while ((ret= SSL_write(ssl, buf, (int)size)) < 0)
{
- enum enum_vio_io_event event;
-
- /* Process the SSL I/O error. */
- if (!ssl_should_retry(vio, ret, &event))
- break;
-
- /* Attempt to wait for an I/O event. */
- if (vio_socket_io_wait(vio, event))
+ if (handle_ssl_io_error(vio,ret))
break;
}
}
@@ -263,14 +296,7 @@ static int ssl_handshake_loop(Vio *vio, SSL *ssl, ssl_handshake_func_t func)
/* Initiate the SSL handshake. */
while ((ret= func(ssl)) < 1)
{
- enum enum_vio_io_event event;
-
- /* Process the SSL I/O error. */
- if (!ssl_should_retry(vio, ret, &event))
- break;
-
- /* Wait for I/O so that the handshake can proceed. */
- if (vio_socket_io_wait(vio, event))
+ if (handle_ssl_io_error(vio,ret))
break;
}
diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake
index 1d66c812ec0..fab86048adb 100644
--- a/win/packaging/heidisql.cmake
+++ b/win/packaging/heidisql.cmake
@@ -1,4 +1,4 @@
-SET(HEIDISQL_BASE_NAME "HeidiSQL_11.0_32_Portable")
+SET(HEIDISQL_BASE_NAME "HeidiSQL_11.2_32_Portable")
SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip")
SET(HEIDISQL_URL "http://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}")
SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME})
diff --git a/wsrep-lib b/wsrep-lib
-Subproject 41a6e9dad79c921134e44cf974b6b7ca3b84e53
+Subproject a93955ddeef5989505cbb3a9f8bb12434146256
diff --git a/zlib/CMakeLists.txt b/zlib/CMakeLists.txt
index 810ec8ae0cb..ee79bc0bd5f 100644
--- a/zlib/CMakeLists.txt
+++ b/zlib/CMakeLists.txt
@@ -13,12 +13,6 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
-cmake_minimum_required(VERSION 2.4.4)
-set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS ON)
-
-project(zlib C)
-
-set(VERSION "1.2.11")
include(CheckTypeSize)
include(CheckFunctionExists)
@@ -110,40 +104,7 @@ set(ZLIB_SRCS
zutil.c
)
-if(NOT MINGW)
- set(ZLIB_DLL_SRCS
- win32/zlib1.rc # If present will override custom build rule below.
- )
-endif()
-
-# parse the full version number from zlib.h and include in ZLIB_FULL_VERSION
-file(READ ${CMAKE_CURRENT_SOURCE_DIR}/zlib.h _zlib_h_contents)
-string(REGEX REPLACE ".*#define[ \t]+ZLIB_VERSION[ \t]+\"([-0-9A-Za-z.]+)\".*"
- "\\1" ZLIB_FULL_VERSION ${_zlib_h_contents})
-
ADD_CONVENIENCE_LIBRARY(zlib STATIC
${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS})
RESTRICT_SYMBOL_EXPORTS(zlib)
-
-if(NOT CYGWIN)
- # This property causes shared libraries on Linux to have the full version
- # encoded into their final filename. We disable this on Cygwin because
- # it causes cygz-${ZLIB_FULL_VERSION}.dll to be created when cygz.dll
- # seems to be the default.
- #
- # This has no effect with MSVC, on that platform the version info for
- # the DLL comes from the resource file win32/zlib1.rc
- set_target_properties(zlib PROPERTIES VERSION ${ZLIB_FULL_VERSION})
-endif()
-
-if(CMAKE_SYSTEM_NAME MATCHES "SunOS")
- # On unix-like platforms the library is almost always called libz
- set_target_properties(zlib PROPERTIES OUTPUT_NAME z)
-elseif(UNIX)
- # On unix-like platforms the library is almost always called libz
- set_target_properties(zlib PROPERTIES OUTPUT_NAME z)
-endif()
-
-
-RESTRICT_SYMBOL_EXPORTS(zlib)